lucene搜索引擎

来源:互联网 发布:软件壳 编辑:程序博客网 时间:2024/05/02 05:41
IKAnalyzer2012_FF.jar
lucene-analyzers-common-4.0.0.jar
lucene-core-4.0.0.jar
lucene-highlighter-4.0.0.jar

lucene-queryparser-4.0.0.jar


public class LuceneUtil {private static final Log logger = LogFactory.getLog(LuceneUtil.class) ;/** * 创建索引 */public  static int createFile(String indexDir, List<Map<String , String>> list , String indexKeyStr) throws Exception{    File file = new File(indexDir) ;IndexWriter writer = null ;if(!file.exists()){file.mkdir() ;Directory directory = FSDirectory.open(file) ;IndexWriterConfig config=new IndexWriterConfig(Version.LUCENE_40, new IKAnalyzer()) ;config.setOpenMode(OpenMode.CREATE) ;writer = new IndexWriter(directory , config) ;}else{IndexWriterConfig config=new IndexWriterConfig(Version.LUCENE_40, new IKAnalyzer()) ;Directory directory = FSDirectory.open(file) ;config.setOpenMode(OpenMode.APPEND) ;writer = new IndexWriter(directory , config) ;}Document document = null ;for(Map<String , String> entity : list){document = new Document();for(Map.Entry<String , String> e : entity.entrySet()){String key = e.getKey() ;String value = String.valueOf(e.getValue()) ;if(indexKeyStr.equals(key)) document.add(new Field(key , value , Field.Store.YES , Field.Index.NO)) ;else if(! StringUtils.isEmpty(key))document.add(new Field(key , value , Field.Store.YES , Field.Index.ANALYZED)) ;}}     writer.addDocument(document) ; //添加索引到文档中int numberIndex = writer.numDocs();logger.info("-writer.docCount()----------->" + numberIndex) ;writer.commit() ; writer.close() ;return numberIndex ; } public static List<Map<String , String>> search(IndexSearcher searcher,String searchWord,int pageNo,int pageSize , String analyzerStr) throws Exception {     List<Map<String , String>> ret = new ArrayList<Map<String , String>>() ;     Analyzer analyzer = new IKAnalyzer();  //中文分词器 QueryParser  queryParser = new QueryParser(Version.LUCENE_40 , analyzerStr , analyzer); //把要搜索的文本解析Query queryParser.setDefaultOperator(Operator.AND); Query query = queryParser.parse(searchWord);         ScoreDoc lastSd1 = getLastScoreDoc(pageNo, pageSize, query, searcher); //获取上一页的最后一个元素   TopDocs  docs  = searcher.searchAfter(lastSd1,query, pageSize); ScoreDoc[] sds = docs.scoreDocs; Document doc  = null; for(ScoreDoc sd : sds){  Map<String , String> entity = new HashMap<String , String>() ;    doc = searcher.doc(sd.doc);  List<IndexableField>  fls = doc.getFields() ;  for(IndexableField fl : fls)  entity.put(fl.name() , fl.stringValue()) ;  ret.add(entity) ; } return ret  ; } /**  * 入口  * @param searchWord 关键字  * @param indexDir 路径  * @param pageNo 页码  * @param pageSize 每页展示数目  * @param analyzerStr 关键字采用的分词方式  * @return  * @throws Exception  */ public static List<Map<String , String>> search(String searchWord , String indexDir , int pageNo , int pageSize , String analyzerStr) throws Exception{     IndexReader reader = IndexReader.open(FSDirectory.open((new File(indexDir)))); IndexSearcher searcher = new IndexSearcher(reader);   List<Map<String , String>> res  = search(searcher,searchWord,pageNo,pageSize, analyzerStr); return res ; }/** * 获取上一次的最后一个scoredocs */ private static ScoreDoc getLastScoreDoc(int pageNo,int pageSize,Query query,IndexSearcher searcher) throws IOException {if(pageNo == 1) return null ;//如果是第一页就返回空int num = pageSize*(pageNo-1) ;//获取上一页的最后数量TopDocs tds = searcher.search(query, num) ;return tds.scoreDocs[num-1];}}


0 0