Lucene搜索实例

来源:互联网 发布:淘客程序源码一键部署 编辑:程序博客网 时间:2024/06/06 12:49

废话不多说上代码,一般企业级的综合搜索,多条件搜索完全够用

添加个效果图


[java] view plain copy
  1. package com.lucene.util;  
  2.   
  3. import java.io.File;  
  4. import java.io.IOException;  
  5. import java.util.ArrayList;  
  6.   
  7. import org.apache.log4j.Logger;  
  8. import org.apache.lucene.analysis.TokenStream;  
  9. import org.apache.lucene.document.Document;  
  10. import org.apache.lucene.document.Field.Index;  
  11. import org.apache.lucene.document.Field.Store;  
  12. import org.apache.lucene.index.CorruptIndexException;  
  13. import org.apache.lucene.index.IndexReader;  
  14. import org.apache.lucene.index.IndexWriter;  
  15. import org.apache.lucene.index.IndexWriterConfig;  
  16. import org.apache.lucene.index.LogByteSizeMergePolicy;  
  17. import org.apache.lucene.index.LogMergePolicy;  
  18. import org.apache.lucene.index.Term;  
  19. import org.apache.lucene.search.BooleanQuery;  
  20. import org.apache.lucene.search.IndexSearcher;  
  21. import org.apache.lucene.search.NumericRangeQuery;  
  22. import org.apache.lucene.search.Query;  
  23. import org.apache.lucene.search.ScoreDoc;  
  24. import org.apache.lucene.search.TopScoreDocCollector;  
  25. import org.apache.lucene.search.BooleanClause.Occur;  
  26. import org.apache.lucene.search.highlight.Highlighter;  
  27. import org.apache.lucene.search.highlight.QueryScorer;  
  28. import org.apache.lucene.search.highlight.SimpleFragmenter;  
  29. import org.apache.lucene.search.highlight.SimpleHTMLFormatter;  
  30. import org.apache.lucene.search.highlight.TokenSources;  
  31. import org.apache.lucene.store.Directory;  
  32. import org.apache.lucene.store.FSDirectory;  
  33. import org.apache.lucene.store.LockObtainFailedException;  
  34. import org.apache.lucene.util.Version;  
  35. import org.apache.lucene.document.Field;  
  36. import org.springframework.context.ApplicationContext;  
  37. import org.springframework.context.support.ClassPathXmlApplicationContext;  
  38. import org.wltea.analyzer.lucene.IKQueryParser;  
  39. import org.wltea.analyzer.lucene.IKSimilarity;  
  40.   
  41. import com.lucene.LuceneConfig;  
  42. import com.lucene.data.LuceneData;  
  43. import com.model.Model;  
  44. import com.model.Novel;  
  45. import com.service.NovelService;  
  46.   
  47. /** 
  48.  * lucene工具类 
  49.  *  
  50.  * @author Administrator 
  51.  *  
  52.  */  
  53. public class LuceneUtil {  
  54.     /** 
  55.      * 日志 
  56.      */  
  57.     static Logger logger = Logger.getLogger(LuceneUtil.class);  
  58.       
  59.     public static Integer totalNum=0;  
  60.       
  61.   
  62.     /** 
  63.      * 创建索引 
  64.      * @param data 要放入索引的一条记录 
  65.      * @return 
  66.      */  
  67.     public static synchronized boolean createIndex(LuceneData data) {  
  68.         IndexWriter indexWriter = null;  
  69.         Directory d = null;  
  70.         try {  
  71.             d = FSDirectory.open(new File(LuceneConfig.INDEX_PATH));  
  72.             IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_36,  
  73.                     AnalyzerUtil.getIkAnalyzer());  
  74.             // 3.6以后不推荐用optimize,使用LogMergePolicy优化策略  
  75.             conf.setMergePolicy(optimizeIndex());  
  76.             // 创建索引模式:CREATE,覆盖模式; APPEND,追加模式  
  77.             File file = new File(LuceneConfig.INDEX_PATH);  
  78.             File[] f = file .listFiles();  
  79.             if(f.length==0)      
  80.                 conf.setOpenMode(IndexWriterConfig.OpenMode.CREATE);  
  81.             else  
  82.                 conf.setOpenMode(IndexWriterConfig.OpenMode.APPEND);  
  83.   
  84.             indexWriter = new IndexWriter(d, conf);  
  85.             //因为id是唯一的,如果之前存在就先删除原来的,在创建新的  
  86.             Term term = new Term("id", data.getId());  
  87.             indexWriter.deleteDocuments(term);  
  88.               
  89.             Document doc = getDocument(data);  
  90.             indexWriter.addDocument(doc);  
  91.   
  92.             logger.debug("索引结束,共有索引{}个" + indexWriter.numDocs());  
  93.             //System.out.println("索引结束,共有索引{}个" + indexWriter.numDocs()+":"+doc.get("id")+":"+doc.get("author"));  
  94.             // 自动优化合并索引文件,3.6以后不推荐用optimize,使用LogMergePolicy优化策略  
  95.             // indexWriter.optimize();  
  96.             indexWriter.commit();  
  97.             return true;  
  98.         } catch (CorruptIndexException e) {  
  99.             e.printStackTrace();  
  100.             logger.error("索引添加异常", e);  
  101.         } catch (LockObtainFailedException e) {  
  102.             e.printStackTrace();  
  103.             logger.error("索引添加异常", e);  
  104.         } catch (IOException e) {  
  105.             e.printStackTrace();  
  106.             logger.error("索引不存在", e);  
  107.         } catch (Exception e) {  
  108.             e.printStackTrace();  
  109.             logger.error("索引添加异常", e);  
  110.         } finally {  
  111.             if (indexWriter != null) {  
  112.                 try {  
  113.                     indexWriter.close();  
  114.                 } catch (CorruptIndexException e) {  
  115.                     e.printStackTrace();  
  116.                     logger.error("索引关闭异常", e);  
  117.                 } catch (IOException e) {  
  118.                     e.printStackTrace();  
  119.                     logger.error("索引关闭异常", e);  
  120.                 } finally {  
  121.                     try {  
  122.                         if (d != null && IndexWriter.isLocked(d)) {  
  123.                             IndexWriter.unlock(d);  
  124.                         }  
  125.                     } catch (IOException e) {  
  126.                         e.printStackTrace();  
  127.                         logger.error("解锁异常", e);  
  128.                     }  
  129.                 }  
  130.             }  
  131.         }  
  132.         return false;  
  133.     }  
  134.   
  135.     /** 
  136.      * 更新索引 
  137.      *  
  138.      * @param data 
  139.      * @return 
  140.      */  
  141.     public static boolean updateIndex(LuceneData data) {  
  142.         IndexWriter indexWriter = null;  
  143.         Directory d = null;  
  144.         try {  
  145.             d = FSDirectory.open(new File(LuceneConfig.INDEX_PATH));  
  146.             while (d != null && IndexWriter.isLocked(d)) {// 如果文件锁住,等待解锁  
  147.                 Thread.sleep(1000);  
  148.                 logger.error("索引已经锁住,正在等待....");  
  149.             }  
  150.             IndexWriterConfig conf = new IndexWriterConfig(Version.LUCENE_31,  
  151.                     AnalyzerUtil.getIkAnalyzer());  
  152.             // 3.6以后不推荐用optimize,使用LogMergePolicy优化策略  
  153.             conf.setMergePolicy(optimizeIndex());  
  154.   
  155.             indexWriter = new IndexWriter(d, conf);  
  156.             Term term = new Term("id", data.getId());  
  157.             // 不管更新与否,先删除原来的  
  158.             indexWriter.deleteDocuments(term);  
  159.   
  160.             Document doc = getDocument(data);  
  161.             indexWriter.addDocument(doc);  
  162.             // indexWriter.optimize();  
  163.   
  164.             indexWriter.commit();  
  165.             logger.debug("更新索引,文章ID为{}" + data.getId());  
  166.             logger.debug("共有索引{}个" + indexWriter.numDocs());  
  167.             return true;  
  168.         } catch (CorruptIndexException e) {  
  169.             e.printStackTrace();  
  170.             logger.error("索引添加异常", e);  
  171.         } catch (LockObtainFailedException e) {  
  172.             e.printStackTrace();  
  173.             logger.error("索引添加异常", e);  
  174.         } catch (IOException e) {  
  175.             e.printStackTrace();  
  176.             logger.error("索引不存在", e);  
  177.         } catch (Exception e) {  
  178.             e.printStackTrace();  
  179.             logger.error("索引添加异常", e);  
  180.         } finally {  
  181.             if (indexWriter != null) {  
  182.                 try {  
  183.                     indexWriter.close();  
  184.                 } catch (CorruptIndexException e) {  
  185.                     e.printStackTrace();  
  186.                     logger.error("索引关闭异常", e);  
  187.                 } catch (IOException e) {  
  188.                     e.printStackTrace();  
  189.                     logger.error("索引关闭异常", e);  
  190.                 } finally {  
  191.                     try {  
  192.                         if (d != null && IndexWriter.isLocked(d)) {  
  193.                             IndexWriter.unlock(d);  
  194.                         }  
  195.                     } catch (IOException e) {  
  196.                         e.printStackTrace();  
  197.                         logger.error("解锁异常", e);  
  198.                     }  
  199.                 }  
  200.             }  
  201.         }  
  202.         return false;  
  203.     }  
  204.   
  205.     /** 
  206.      * 根据id删除索引(id对应的那条document) 
  207.      *  
  208.      * @param id 
  209.      *            document的id 
  210.      * @return 
  211.      */  
  212.     public static boolean deleteIndex(String id) {  
  213.         IndexWriter indexWriter = null;  
  214.         Directory d = null;  
  215.         try {  
  216.             d = FSDirectory.open(new File(LuceneConfig.INDEX_PATH));  
  217.             while (d != null && IndexWriter.isLocked(d)) {// 如果文件锁住,等待解锁  
  218.                 Thread.sleep(1000);  
  219.                 logger.error("索引已经锁住,正在等待....");  
  220.             }  
  221.   
  222.             IndexWriterConfig indexWriterConfig = new IndexWriterConfig(  
  223.                     Version.LUCENE_36, AnalyzerUtil.getIkAnalyzer());  
  224.             indexWriter = new IndexWriter(d, indexWriterConfig);  
  225.             Term term = new Term("id", id);  
  226.             indexWriter.deleteDocuments(term);  
  227.             indexWriter.optimize();  
  228.             indexWriter.commit();  
  229.             logger.debug("删除文章ID:{}的索引..." + id);  
  230.             logger.debug("共有索引{}个" + indexWriter.numDocs());  
  231.             indexWriter.close();  
  232.             return true;  
  233.         } catch (CorruptIndexException e) {  
  234.             e.printStackTrace();  
  235.             logger.error("索引删除异常", e);  
  236.         } catch (LockObtainFailedException e) {  
  237.             e.printStackTrace();  
  238.             logger.error("索引删除异常", e);  
  239.         } catch (IOException e) {  
  240.             e.printStackTrace();  
  241.             logger.error("索引不存在", e);  
  242.         } catch (Exception e) {  
  243.             e.printStackTrace();  
  244.             logger.error("索引删除异常", e);  
  245.         } finally {  
  246.             if (indexWriter != null) {  
  247.                 try {  
  248.                     indexWriter.close();  
  249.                 } catch (CorruptIndexException e) {  
  250.                     e.printStackTrace();  
  251.                     logger.error("索引关闭异常", e);  
  252.                 } catch (IOException e) {  
  253.                     e.printStackTrace();  
  254.                     logger.error("索引关闭异常", e);  
  255.                 } finally {  
  256.                     try {  
  257.                         if (d != null && IndexWriter.isLocked(d)) {  
  258.                             IndexWriter.unlock(d);  
  259.                         }  
  260.                     } catch (IOException e) {  
  261.                         e.printStackTrace();  
  262.                         logger.error("解锁异常", e);  
  263.                     }  
  264.                 }  
  265.             }  
  266.         }  
  267.         return false;  
  268.     }  
  269.       
  270.     /** 
  271.      * @param fileds 要查询的综合字段 ex【 new String[]{ "contentTitle", "contentContext","keywords"};】 
  272.      * @param occurs 要查询的字段出现可能 ex【new Occur[] { Occur.SHOULD, Occur.SHOULD,Occur.SHOULD };】 
  273.      * @param keyWord 要查询的关键字 
  274.      * @param page 当前页 
  275.      * @param pageSize 分页数 
  276.      * @return 
  277.      */  
  278.     public static ArrayList<LuceneData> search(String[] fileds, Occur[] occurs,String keyWord,Integer page,Integer pageSize) {  
  279.         return search(fileds, occurs, keyWord,"","", page, pageSize);  
  280.     }  
  281.   
  282.     /** 
  283.      * @param fileds 要查询的综合字段 ex【 new String[]{ "contentTitle", "contentContext","keywords"};】 
  284.      * @param occurs 要查询的字段出现可能 ex【new Occur[] { Occur.SHOULD, Occur.SHOULD,Occur.SHOULD };】 
  285.      * @param keyWord 要查询的关键字 
  286.      * @param subType 主类型 
  287.      * @param type 主类型下的子类型 
  288.      * @param page  当前页 
  289.      * @param pageSize 分页数 
  290.      * @return 
  291.      */  
  292.     public static ArrayList<LuceneData> search(String[] fileds, Occur[] occurs,String keyWord,String bigtype,String subType,Integer page,Integer pageSize) {  
  293.         try {  
  294.             // ---------初始化---------------------------------------------------  
  295.             IndexReader reader = IndexReader.open(FSDirectory.open(new File(LuceneConfig.INDEX_PATH)));  
  296.             IndexSearcher searcher = new IndexSearcher(reader);  
  297.             // 在索引器中使用IKSimilarity相似度评估器  
  298.             searcher.setSimilarity(new IKSimilarity());  
  299.   
  300.             // ----------设置过滤器------------------------------------------------  
  301.             BooleanQuery booleanquery = new BooleanQuery();  
  302.             // 综合查询   (查询条件1)  
  303.             Query likequery = IKQueryParser.parseMultiField(fileds, keyWord,occurs);  
  304.             booleanquery.add(likequery, Occur.MUST);  
  305.               
  306.             //主类型过滤 (查询条件2)  
  307.             if(bigtype.length()>0)  
  308.             {  
  309.                 Query subquery = IKQueryParser.parse("bigtype", bigtype);  
  310.                 booleanquery.add(subquery, Occur.MUST);  
  311.             }  
  312.             //从类型过滤 (查询条件3)  
  313.             if(subType.length()>0)  
  314.             {  
  315.                 Query subquery = IKQueryParser.parse("type", subType);  
  316.                 booleanquery.add(subquery, Occur.MUST);  
  317.             }  
  318.               
  319.             //过滤数字区间  
  320.             //NumericRangeQuery<Integer> spanquery = NumericRangeQuery.newIntRange("id", begin, end, true, true);  
  321.             //booleanquery.add(spanquery, Occur.MUST);  
  322.               
  323.             //过滤时间区间(时间的getTime比大小)  
  324.             //NumericRangeQuery<Integer> spanquery = NumericRangeQuery.newLongRange("id", begin, end, true, true);  
  325.             //booleanquery.add(spanquery, Occur.MUST);  
  326.               
  327.             //-------------过滤filter--------------------------------------------------  
  328.               
  329.             //-------------设置权值(其中一个方法在doc创建Field时field.setBoost)--------------------  
  330.               
  331.             //-------------排序--------------------------------------------------------  
  332.             /*多字段排序,设置在前面的会优先排序 //true:降序 false:升序 
  333.              * SortField[] sortFields = new SortField[3]; 
  334.              * SortField top = new SortField("isTop", SortField.INT, true); 
  335.              * SortField hits = new SortField("contentHits", SortField.INT,true);  
  336.              * SortField pubtime = new SortField("publishTime",SortField.LONG, true);  
  337.              * sortFields[0] = top;  
  338.              * sortFields[1] = hits; 
  339.              * sortFields[2] = pubtime;  
  340.              * Sort sort = new Sort(sortFields); 
  341.              */  
  342.               
  343.             //-------------搜索--------------------------------------------------------  
  344.             //分页查询,lucene不支持分页查询,因为查询速度很快,所以我们就设置查询上限  
  345.             TopScoreDocCollector topCollector = TopScoreDocCollector.create(page*pageSize, false);//上限  
  346.             searcher.search(booleanquery, topCollector);  
  347.             //查询结果的总数量  
  348.             totalNum=topCollector.getTotalHits();  
  349.             ScoreDoc[] docs = topCollector.topDocs((page - 1) * pageSize, pageSize).scoreDocs;//返回所需数据  
  350.               
  351.             //高亮显示  
  352.             SimpleHTMLFormatter simpleHtmlFormatter = new SimpleHTMLFormatter("<font color='red'>""</font>");  
  353.             Highlighter highlighter = new Highlighter(simpleHtmlFormatter, new QueryScorer(booleanquery));  
  354.             highlighter.setTextFragmenter(new SimpleFragmenter(100));  
  355.               
  356.             ArrayList<LuceneData> list = new ArrayList<LuceneData>();  
  357.             LuceneData data=null;  
  358.             for (ScoreDoc scdoc : docs) {  
  359.                 Document document = searcher.doc(scdoc.doc);  
  360.                 data=new LuceneData();  
  361.                 //设置高壳  
  362.                 TokenStream tokenStream=null;  
  363.                 String name = document.get("name");  
  364.                 tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), scdoc.doc, "name", AnalyzerUtil.getIkAnalyzer());  
  365.                 name = highlighter.getBestFragment(tokenStream, name);  
  366.                 if(name==null)  
  367.                     name=document.get("name");  
  368.                   
  369.                 String author = document.get("author");  
  370.                 tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), scdoc.doc, "author", AnalyzerUtil.getIkAnalyzer());  
  371.                 author = highlighter.getBestFragment(tokenStream, author);            
  372.                 if(author==null)  
  373.                     author=document.get("author");  
  374.                   
  375.                 String outline = document.get("outline");  
  376.                 tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), scdoc.doc, "outline", AnalyzerUtil.getIkAnalyzer());  
  377.                 outline = highlighter.getBestFragment(tokenStream, outline);                  
  378.                 if(outline==null)  
  379.                     outline=document.get("outline");  
  380.                   
  381.                 String type = document.get("type");  
  382.                 tokenStream = TokenSources.getAnyTokenStream(searcher.getIndexReader(), scdoc.doc, "type", AnalyzerUtil.getIkAnalyzer());  
  383.                 type = highlighter.getBestFragment(tokenStream, type);        
  384.                 if(type==null)  
  385.                     type=document.get("type");  
  386.   
  387.                 data.setId(document.get("id"));  
  388.                 data.setName(name);  
  389.                 data.setAuthor(author);  
  390.                 data.setOutline(outline);  
  391.                 data.setType(type);  
  392.                 data.setTypeid(document.get("typeid")) ;  
  393.                 data.setBigtype(document.get("bigtype"));  
  394.                 data.setUpdateTime(document.get("updateTime"));  
  395.                 data.setImgPath(document.get("imgPath"));  
  396.                 data.setImgUrlPath(document.get("imgUrlPath"));  
  397.                 data.setContent(document.get("content"));  
  398.                 data.setLink_url(document.get("link_url"));  
  399.                 data.setHot(Long.parseLong(document.get("hot")));  
  400.                 data.setClickPoint(Long.parseLong(document.get("clickPoint")));  
  401.                   
  402.                 list.add(data);  
  403.             }  
  404.             return list;  
  405.         } catch (Exception e) {  
  406.             e.printStackTrace();  
  407.             logger.error("搜索异常", e);  
  408.             return new ArrayList<LuceneData>();  
  409.         }  
  410.     }  
  411.   
  412.     /** 
  413.      * 把传入的数据类型转换成Document 
  414.      *  
  415.      * @param data 
  416.      * @return 
  417.      */  
  418.     private static Document getDocument(LuceneData data) {  
  419.         Document doc = new Document();  
  420.         doc.add(new Field("id", data.getId(), Store.YES, Index.NOT_ANALYZED));  
  421.         doc.add(new Field("name", data.getName(), Store.YES, Index.ANALYZED));  
  422.         doc.add(new Field("author", data.getAuthor(), Store.YES,Index.ANALYZED));  
  423.         doc.add(new Field("outline", data.getOutline(), Store.YES,Index.ANALYZED));  
  424.         doc.add(new Field("type", data.getType(), Store.YES, Index.ANALYZED));  
  425.         doc.add(new Field("updateTime", data.getUpdateTime(), Store.YES,Index.NOT_ANALYZED));  
  426.         doc.add(new Field("imgPath", data.getImgPath(), Store.YES,Index.NOT_ANALYZED));  
  427.         doc.add(new Field("imgUrlPath", data.getImgUrlPath()==null?"":data.getImgUrlPath(), Store.YES,Index.NOT_ANALYZED));  
  428.         doc.add(new Field("content", data.getContent()==null?"":data.getContent(), Store.YES,Index.ANALYZED));  
  429.         doc.add(new Field("link_url", data.getLink_url(), Store.YES,Index.NOT_ANALYZED));  
  430.   
  431.         doc.add(new Field("hot", Long.toString(data.getHot()), Store.YES,Index.NOT_ANALYZED));  
  432.         doc.add(new Field("clickPoint", Long.toString(data.getClickPoint()),Store.YES, Index.NOT_ANALYZED));  
  433.           
  434.         doc.add(new Field("bigtype", data.getBigtype(), Store.YES,Index.NOT_ANALYZED));  
  435.         doc.add(new Field("typeid", data.getTypeid(), Store.YES,Index.NOT_ANALYZED));  
  436.         return doc;  
  437.     }  
  438.   
  439.     /** 
  440.      * 优化索引,返回优化策略 
  441.      *  
  442.      * @return 
  443.      */  
  444.     private static LogMergePolicy optimizeIndex() {  
  445.         LogMergePolicy mergePolicy = new LogByteSizeMergePolicy();  
  446.   
  447.         // 设置segment添加文档(Document)时的合并频率  
  448.         // 值较小,建立索引的速度就较慢  
  449.         // 值较大,建立索引的速度就较快,>10适合批量建立索引  
  450.         // 达到50个文件时就和合并  
  451.         mergePolicy.setMergeFactor(50);  
  452.   
  453.         // 设置segment最大合并文档(Document)数  
  454.         // 值较小有利于追加索引的速度  
  455.         // 值较大,适合批量建立索引和更快的搜索  
  456.         mergePolicy.setMaxMergeDocs(5000);  
  457.   
  458.         // 启用复合式索引文件格式,合并多个segment  
  459.         mergePolicy.setUseCompoundFile(true);  
  460.         return mergePolicy;  
  461.     }  
  462.       
  463.     /** 
  464.      * 转换类型成lucene的data类型 
  465.      * @param list 
  466.      * @return 
  467.      */  
  468.     public static ArrayList<LuceneData> transformation_Novel(ArrayList<Novel> list){  
  469.         ArrayList<LuceneData> transforlist=new ArrayList<LuceneData>();  
  470.         LuceneData data=new LuceneData();  
  471.         for(Model model : list)  
  472.         {  
  473.             if(model instanceof Novel)  
  474.             {  
  475.                 data=new LuceneData();  
  476.                 Novel novel=(Novel)model;  
  477.                 data.setId(novel.getId()+"");  
  478.                 data.setName(novel.getName());  
  479.                 data.setAuthor(novel.getAuthor());  
  480.                 data.setOutline(novel.getOutline());  
  481.                 data.setType(novel.getNovelType().getName());  
  482.                 data.setTypeid(novel.getNovelType().getId()+"");  
  483.                 data.setBigtype("小说");  
  484.                 data.setUpdateTime(novel.getUpdateTime()+"");  
  485.                 data.setImgPath(novel.getImgPath());  
  486.                 data.setImgUrlPath(novel.getImgUrlPath());  
  487.                 data.setContent(novel.getContent());  
  488.                 data.setLink_url(novel.getLink_url());  
  489.                 data.setHot(novel.getHot());  
  490.                 data.setClickPoint(novel.getClickPoint());  
  491.                 transforlist.add(data);  
  492.             }  
  493.         }  
  494.         return transforlist;  
  495.     }  
  496.     /** 
  497.      * 测试 
  498.      * @param args 
  499.      */  
  500.     public static void main(String[] args)  
  501.     {  
  502. //---------------------创建  
  503. //      ApplicationContext springContext = new ClassPathXmlApplicationContext(new String[]{"classpath:com/springResource/*.xml"});  
  504. //      NovelService novelService = (NovelService)springContext.getBean("novelService");   
  505. //      System.out.println("novelService"+novelService);  
  506. //        
  507. //      ArrayList<Novel> list=novelService.getNovelList(21, 100);  
  508. //      ArrayList<LuceneData> transforlist=LuceneService.transformation(list);  
  509. //      for(LuceneData data : transforlist)  
  510. //      {  
  511. //          System.out.println("in"+LuceneService.createIndex(data));  
  512. //      }  
  513.           
  514. //---------------------搜索  
  515.         String[] fileds=new String[]{ "name""author","outline","type"};  
  516.         Occur[] occurs=new Occur[] { Occur.SHOULD, Occur.SHOULD,Occur.SHOULD ,Occur.SHOULD };  
  517.         ArrayList<LuceneData> list=LuceneUtil.search(fileds, occurs, "初雪"110);  
  518.           
  519.         for(LuceneData data:list)  
  520.         {  
  521.             System.out.println(data);  
  522.             System.out.println(data.getId()+":"+data.getAuthor());  
  523.         }  
  524.         System.out.println(list.size());  
  525.     }  
  526. }  

[java] view plain copy
  1. package com.lucene.util;  
  2.   
  3. import org.apache.lucene.analysis.Analyzer;  
  4. import org.wltea.analyzer.lucene.IKAnalyzer;  
  5.   
  6. /** 
  7.  * 分词器工具,设定分词器 
  8.  * @author Administrator 
  9.  * 
  10.  */  
  11. public class AnalyzerUtil {  
  12.     private static Analyzer analyzer;  
  13.   
  14.     public static Analyzer getIkAnalyzer() {  
  15.         if (analyzer == null) {  
  16.             // 当为true时,分词器迚行最大词长切分 ;当为false时,分词器迚行最细粒度切  
  17.             analyzer = new IKAnalyzer(true);  
  18.         }  
  19.         return analyzer;  
  20.     }  
  21. }  

[java] view plain copy
  1. package com.lucene.data;  
  2.   
  3.   
  4. /** 
  5.  * 数据类 
  6.  * @author Administrator 
  7.  * 
  8.  */  
  9. public class LuceneData {  
  10.     private String id;  
  11.     private String name;  
  12.     private String author;  
  13.     private String imgPath;  
  14.     private String outline; //描述  
  15.     private String type; //类型  
  16.     private String typeid;//类型 id  
  17.     private String bigtype; // 总类型  
  18.       
  19.     private String updateTime;  
  20.     private String imgUrlPath;  
  21.     private String content;  
  22.     private String link_url;  
  23.       
  24.     private Long hot=0l;  
  25.        
  26.     private Long clickPoint=0l;  
  27.   
  28.     public String getId() {  
  29.         return id;  
  30.     }  
  31.   
  32.     public void setId(String id) {  
  33.         this.id = id;  
  34.     }  
  35.   
  36.     public String getName() {  
  37.         return name;  
  38.     }  
  39.   
  40.     public void setName(String name) {  
  41.         this.name = name;  
  42.     }  
  43.   
  44.     public String getAuthor() {  
  45.         return author;  
  46.     }  
  47.   
  48.     public void setAuthor(String author) {  
  49.         this.author = author;  
  50.     }  
  51.   
  52.     public String getImgPath() {  
  53.         return imgPath;  
  54.     }  
  55.   
  56.     public void setImgPath(String imgPath) {  
  57.         this.imgPath = imgPath;  
  58.     }  
  59.   
  60.     public String getOutline() {  
  61.         return outline;  
  62.     }  
  63.   
  64.     public void setOutline(String outline) {  
  65.         this.outline = outline;  
  66.     }  
  67.   
  68.     public String getType() {  
  69.         return type;  
  70.     }  
  71.   
  72.     public void setType(String type) {  
  73.         this.type = type;  
  74.     }  
  75.   
  76.     public String getUpdateTime() {  
  77.         return updateTime;  
  78.     }  
  79.   
  80.     public void setUpdateTime(String updateTime) {  
  81.         this.updateTime = updateTime;  
  82.     }  
  83.   
  84.     public String getImgUrlPath() {  
  85.         return imgUrlPath;  
  86.     }  
  87.   
  88.     public void setImgUrlPath(String imgUrlPath) {  
  89.         this.imgUrlPath = imgUrlPath;  
  90.     }  
  91.   
  92.     public String getContent() {  
  93.         return content;  
  94.     }  
  95.   
  96.     public void setContent(String content) {  
  97.         this.content = content;  
  98.     }  
  99.   
  100.     public String getLink_url() {  
  101.         return link_url;  
  102.     }  
  103.   
  104.     public void setLink_url(String linkUrl) {  
  105.         link_url = linkUrl;  
  106.     }  
  107.   
  108.     public Long getHot() {  
  109.         return hot;  
  110.     }  
  111.   
  112.     public void setHot(Long hot) {  
  113.         this.hot = hot;  
  114.     }  
  115.   
  116.     public Long getClickPoint() {  
  117.         return clickPoint;  
  118.     }  
  119.   
  120.     public void setClickPoint(Long clickPoint) {  
  121.         this.clickPoint = clickPoint;  
  122.     }  
  123.   
  124.       
  125.   
  126.     public String getBigtype() {  
  127.         return bigtype;  
  128.     }  
  129.   
  130.     public void setBigtype(String bigtype) {  
  131.         this.bigtype = bigtype;  
  132.     }  
  133.   
  134.     @Override  
  135.     public String toString() {  
  136.         return "LuceneData [author=" + author + ", bigtype=" + bigtype  
  137.                 + ", clickPoint=" + clickPoint + ", content=" + content  
  138.                 + ", hot=" + hot + ", id=" + id + ", imgPath=" + imgPath  
  139.                 + ", imgUrlPath=" + imgUrlPath + ", link_url=" + link_url  
  140.                 + ", name=" + name + ", outline=" + outline + ", type=" + type  
  141.                 + ", updateTime=" + updateTime + "]";  
  142.     }  
  143.   
  144.     public String getTypeid() {  
  145.         return typeid;  
  146.     }  
  147.   
  148.     public void setTypeid(String typeid) {  
  149.         this.typeid = typeid;  
  150.     }  
  151.       
  152.       

0 0
原创粉丝点击