JAVA_WEB项目之Lucene检索框架实现增删查改的代码优化以及分页功能实现

来源:互联网 发布:网络单机游戏下载 编辑:程序博客网 时间:2024/06/15 22:04

上一篇:JAVA_WEB项目之Lucene检索框架入门案例初步给出了一个入门的例子,接下来是对上一篇例子的优化和增加删除,修改document,以及通过更新索引库和删除索引库的代码。

首先还是实体类不变:Goods

package com.shop.demo;public class Goods {private Integer id;private String name;private Double price;private String pic;private String remark;public Integer getId() {return id;}public void setId(Integer id) {this.id = id;}public String getName() {return name;}public void setName(String name) {this.name = name;}public Double getPrice() {return price;}public void setPrice(Double price) {this.price = price;}public String getPic() {return pic;}public void setPic(String pic) {this.pic = pic;}public String getRemark() {return remark;}public void setRemark(String remark) {this.remark = remark;}}


Lucene的配置类(ConfigureLucene):涉及到单例设计模式

package com.shop.demo;import java.io.File;import java.io.IOException;import org.apache.lucene.analysis.Analyzer;import org.apache.lucene.analysis.standard.StandardAnalyzer;import org.apache.lucene.store.Directory;import org.apache.lucene.store.FSDirectory;import org.apache.lucene.util.Version;public class ConfigureLucene {private ConfigureLucene(){}//创建索引库private static Directory dir=null;//创建分词器private static Analyzer ana=null;static{//根据指定的路径创建索引库,如果路径不存在就会创建try {dir=FSDirectory.open(new File("c:/demo"));//不同的分词器的版本不同,分词的算法不同,StandardAnalyzer只适用于英文ana=new StandardAnalyzer(Version.LUCENE_30);} catch (Exception e) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}public static Directory getDir() {return dir;}public static Analyzer getAna() {return ana;}}
Lucene的工具类DocumentUtil:

package com.shop.demo;import org.apache.lucene.document.Document;import org.apache.lucene.document.Field;import org.apache.lucene.document.Field.Index;import org.apache.lucene.document.Field.Store;public class DocumentUtil {private DocumentUtil(){}/** * 把goods对象转为document对象 */public static Document goodsToDocument(Goods goods){//把goods对象转为documentDocument doc=new Document();doc.add(new Field("id", goods.getId().toString(), Store.YES, Index.NOT_ANALYZED));doc.add(new Field("name", goods.getName(), Store.YES, Index.ANALYZED));doc.add(new Field("price", goods.getPrice().toString(), Store.YES, Index.NOT_ANALYZED));doc.add(new Field("remark", goods.getRemark(), Store.NO, Index.ANALYZED));return doc;}/** * 把document对象转为goods对象 */public static Goods documentToGoods(Document doc){Goods goods=new Goods();goods.setId(Integer.parseInt(doc.get("id")));goods.setName(doc.get("name"));goods.setPrice(Double.parseDouble(doc.get("price")));goods.setRemark(doc.get("remark"));return goods;}}


接下来是Lucene的实现类HelloWordLucene:

package com.shop.demo;import java.io.File;import java.io.IOException;import java.util.ArrayList;import java.util.List;import org.apache.lucene.analysis.Analyzer;import org.apache.lucene.analysis.standard.StandardAnalyzer;import org.apache.lucene.document.Document;import org.apache.lucene.document.Field;import org.apache.lucene.document.Field.Index;import org.apache.lucene.document.Field.Store;import org.apache.lucene.index.CorruptIndexException;import org.apache.lucene.index.IndexWriter;import org.apache.lucene.index.IndexWriter.MaxFieldLength;import org.apache.lucene.index.Term;import org.apache.lucene.queryParser.QueryParser;import org.apache.lucene.search.IndexSearcher;import org.apache.lucene.search.Query;import org.apache.lucene.search.ScoreDoc;import org.apache.lucene.search.TopDocs;import org.apache.lucene.store.Directory;import org.apache.lucene.store.FSDirectory;import org.apache.lucene.util.Version;/** *  此案例实现Lucene向索引库中添加索引和查询索引的功能 * @author Administrator * */public class HelloWordLucene {/** * 把good商品对象添加到索引库中 * @param goods */public void addDocument(Goods goods){//创建indexWriterIndexWriter indexwriter=null;//创建索引库Directory dir=null;//创建分词器Analyzer ana=null;try {//根据指定的路径创建索引库,如果路径不存在就会创建dir=ConfigureLucene.getDir();//不同的分词器的版本不同,分词的算法不同,StandardAnalyzer只适用于英文ana=ConfigureLucene.getAna();//限制Field的数量为10000indexwriter=new IndexWriter(dir, ana, MaxFieldLength.LIMITED);//把goods对象转为document//Document doc=new Document();/** * Store配置field字段是否存储到索引库 * YES:字段存储到索引库中,以后查询的时候可以查询出来 * No:不存储到索引库中 *  Index: Lucene为提高查询效率,会像字典一样创建索引. 配置此字段是否要建立索引(建立索引的Field就是Term), *  如果建立索引以后就可以通过此字段查询记录 *   NOT_ANALYZED: 创建索引,但是Field的不分词(不分开) 整体作为一个索引 *   ANALYZED: 不但要建立索引此Field会被分词(可能一个Field分为多个Term的情况) *   NO: 不建立索引,以后不能通过此字段查询数据  *  Store yes Index: ANALYZED: 此Field可以存储,而且Field 关键字支持分词 *  Store yes Index: NOT_ANALYZED 此Field可以存储,但是Field不支持分词,作为一个完成Term   例如: 数字 id  price  和URL 专业词汇 *  Store yes Index: NO:  可以查询出此字段, 但是此字段不作为查询关键字 *  Store no  Index: ANALYZED:  此Field不存储,但是此Field可以做为关键字搜索   *  Store no  Index: NOT_ANALYZED: 此Field不存储,但是此Field可以做为整体(不拆分)关键字搜索 *  Store no  Index: NO:  既不建索引也不存储 没有任何意义,如果这样配置则会抛出异常 *///doc.add(new Field("id", goods.getId().toString(), Store.YES, Index.NOT_ANALYZED));//doc.add(new Field("name", goods.getName(), Store.YES, Index.ANALYZED));//doc.add(new Field("price", goods.getPrice().toString(), Store.YES, Index.NOT_ANALYZED));//doc.add(new Field("remark", goods.getRemark(), Store.NO, Index.ANALYZED));indexwriter.addDocument(DocumentUtil.goodsToDocument(goods));// 如果没有提交,在没有异常的情况close()之前会自动提交indexwriter.commit();} catch (Exception e) {try {indexwriter.rollback();throw new RuntimeException(e);} catch (IOException e1) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}finally{try {indexwriter.close();} catch (Exception e) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}}/** * 根据指定的条件查询, * @param name 指定的关键字 * @return 封装了goods对象的list集合 */public List<Goods> queryGoods(String name){List<Goods> goodsList=new ArrayList<Goods>();//创建查询对象IndexSearcher searcher=null;//创建索引库Directory dir=null;//创建分词器Analyzer analyzer=null;try {dir=FSDirectory.open(new File("c:/demo"));searcher=new IndexSearcher(dir);// 创建分词器,给查询的关键字先做分词操作,然后在到索引库中匹配Termanalyzer=new StandardAnalyzer(Version.LUCENE_30);//创建查询解析对象,"name"指定从索引库中的哪个field属性里面查找,也就是name到那个Term(key value)中去查询QueryParser queryParser=new QueryParser(Version.LUCENE_30, "name", analyzer);//  指定查询的关键字到索引库查询Query query=queryParser.parse(name);/** * 根据给定的关键字查询,与索引库Term去匹配,5代表: 期望返回的结果数 *  第一次查询: indexSearcher.search 只能获取文档的索引号和匹配的数量 *  返回的结果是TopDoc类型 *  totalHits: 命中数, 数组的长度,后面用来做分页 *  ScoreDoc[]: 存储匹配的文档编号的数组 *  Score: 文档的积分,按照命中率自动算出来 *  Doc:当前文档的编号 */TopDocs topDocs= searcher.search(query, 5);// 此变量/每页显示的记录数就是总页数System.out.println("真正命中的结果数:" + topDocs.totalHits);// 返回的是符合条件的文档编号,并不是文档本事ScoreDoc scoreDocs[]= topDocs.scoreDocs;for(int i=0;i<scoreDocs.length;i++){ScoreDoc scoreDoc= scoreDocs[i];System.out.println("真正的命中率:"+scoreDoc.score);System.out.println("存储的是文档编号:"+scoreDoc.doc);Document doc= searcher.doc(scoreDoc.doc);System.out.println(doc.get("id"));System.out.println(doc.get("name"));System.out.println(doc.get("price"));System.out.println(doc.get("remark"));System.out.println("---------");//Goods goods=new Goods();//goods.setId(Integer.parseInt(doc.get("id")));//goods.setName(doc.get("name"));//goods.setPrice(Double.parseDouble(doc.get("price")));//goods.setRemark(doc.get("remark"));goodsList.add(DocumentUtil.documentToGoods(doc));}} catch (Exception e) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}finally{try {searcher.close();} catch (IOException e) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}return goodsList;}public void deleteDocument(int id){//创建indexWriterIndexWriter indexwriter=null;//创建索引库Directory dir=null;//创建分词器Analyzer ana=null;try {//根据指定的路径创建索引库,如果路径不存在就会创建dir=ConfigureLucene.getDir();//不同的分词器的版本不同,分词的算法不同,StandardAnalyzer只适用于英文ana=ConfigureLucene.getAna();//限制Field的数量为10000indexwriter=new IndexWriter(dir, ana, MaxFieldLength.LIMITED);// 一般来说都是通过id来删除,所以即使是通过name查询,ID也要建索引,因为更新和删除需要id// 根据ID把符合条件的document对象删除掉,但是索引(term) 并没有删除indexwriter.deleteDocuments(new Term("id", id+""));//同步删除索引库中的索引部分indexwriter.optimize();// 如果没有提交,在没有异常的情况close()之前会自动提交indexwriter.commit();} catch (Exception e) {try {indexwriter.rollback();throw new RuntimeException(e);} catch (IOException e1) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}finally{try {indexwriter.close();} catch (Exception e) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}}public void updateDocument(Goods goods){IndexWriter indexwriter=null;Directory dir=null;Analyzer analayzer=null;try{dir=ConfigureLucene.getDir();analayzer=ConfigureLucene.getAna();indexwriter=new IndexWriter(dir, analayzer, MaxFieldLength.LIMITED);indexwriter.updateDocument(new Term("id", goods.getId().toString()), DocumentUtil.goodsToDocument(goods), analayzer);indexwriter.optimize();indexwriter.commit();}catch (Exception e) {// TODO: handle exceptiontry {indexwriter.rollback();throw new RuntimeException(e);} catch (IOException e1) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}finally{try {indexwriter.close();} catch (Exception e) {// TODO Auto-generated catch blockthrow new RuntimeException(e);}}}}

下面我们给出一个测试类对上面的功能进行单元测试HelloWordLuceneTest:

package com.shop.demo;import java.util.List;import org.junit.AfterClass;import org.junit.BeforeClass;import org.junit.Test;public class HelloWordLuceneTest {private static HelloWordLucene hellowod;@BeforeClasspublic static void setUpBeforeClass() throws Exception {hellowod=new HelloWordLucene();}@AfterClasspublic static void tearDownAfterClass() throws Exception {hellowod=null;}@Testpublic void testAddDocument() {Goods goods=new Goods();goods.setId(12);goods.setName("IBM Computer12 ");goods.setPrice(2333.9);goods.setRemark("IBM Computer is good");hellowod.addDocument(goods);}@Testpublic void testquery() {List<Goods> list= hellowod.queryGoods("ibm");for(Goods good:list){System.out.println("商品编号:"+good.getId()+",商品名称:"+good.getName()+",商品价格:"+good.getPrice()+",商品的详细信息:"+good.getRemark());}}@Testpublic void testdelete() { hellowod.deleteDocument(12);}@Testpublic void testupdate() {Goods goods=new Goods();goods.setId(12);goods.setName("HP Com");goods.setPrice(3000.0);goods.setRemark("HP very goods");hellowod.updateDocument(goods);}}


分页的代码:

/** * 实现分页的功能   * @param name 查询的关键字 * @param currentPage 当前的页数 * @return 记录数 */public List<Goods> queryByPage(String name,int currentPage){int number=5; // 每页显示5条List<Goods> goodsList=new ArrayList<Goods>();IndexSearcher indexSearcher=null;Directory directory=null;Analyzer analyzer=null;try {directory=FSDirectory.open(new File("C:/demo"));// 创建查询对象indexSearcher=new IndexSearcher(directory);// 创建分词器,给查询的关键字先做分词操作,然后在到索引库中匹配Termanalyzer=new StandardAnalyzer(Version.LUCENE_30);// 查询数据的解析器, name到那个Term(key value)中去查询QueryParser parser=new QueryParser(Version.LUCENE_30,"name",analyzer);// 指定查询的关键字Query query=parser.parse(name);TopDocs topDocs=indexSearcher.search(query,currentPage*number);// 此变量/每页显示的记录数就是总页数System.out.println("真正命中的结果数:" + topDocs.totalHits);int totalPage=0;if(topDocs.totalHits%number!=0){totalPage=topDocs.totalHits/number+1;}else{totalPage=topDocs.totalHits/number;}System.out.println("通过系统的总结果数/每页显示的数量=总页数" + totalPage);// 返回的是符合条件的文档编号,并不是文档本事ScoreDoc[] scoreDocs = topDocs.scoreDocs;// 去期望值  和实际值的 最小值System.out.println("真正查询出来的数组的长度:" + scoreDocs.length);for(int i=(currentPage-1)*number;i<scoreDocs.length;i++){ScoreDoc scoreDoc=scoreDocs[i];System.out.println("存储了命中率积分:" + scoreDoc.score);System.out.println("存储的是文档编号:" + scoreDoc.doc);// 第二次查询: 通过文档的编号,查询真正的文档信息Document document=indexSearcher.doc(scoreDoc.doc);goodsList.add(DocumentUtil.documentToGoods(document));}} catch (Exception e) {throw new RuntimeException(e);}finally{try {indexSearcher.close();} catch (IOException e) {throw new RuntimeException(e);}}return goodsList;}

测试方法:

@Testpublic void testqueryByPage() {List<Goods> list= hellowod.queryByPage("ibm", 1);for(Goods good:list){System.out.println("商品编号:"+good.getId()+",商品名称:"+good.getName()+",商品价格:"+good.getPrice()+",商品的详细信息:"+good.getRemark());}}




0 1