一个简单的Filter:安全级别与过滤器

来源:互联网 发布:微信一夜暴富软件 编辑:程序博客网 时间:2024/06/11 05:42
/*
 * 一个简单的Filter:安全级别与过滤器
 *这个代码的功能是使用Lucene的Filter对于规定级别的文档
 *进行过滤,不予显示
 *这里级别为0的就将被过滤掉

 * */



如图:



package filter;


import java.io.IOException;
import java.util.BitSet;


import org.apache.lucene.analysis.standard.StandardAnalyzer;
import org.apache.lucene.document.Document;
import org.apache.lucene.document.Field;
import org.apache.lucene.index.IndexReader;
import org.apache.lucene.index.IndexWriter;
import org.apache.lucene.index.Term;
import org.apache.lucene.index.TermDocs;
import org.apache.lucene.search.Filter;
import org.apache.lucene.search.Hits;
import org.apache.lucene.search.IndexSearcher;
import org.apache.lucene.search.RangeQuery;


public class FilterTwo {


private final int SECURITY_ADVANCED = 0;
private final int SECURITY_MIDDLE = 1;
private final int SECURITY_NORMAL = 2;

public FilterTwo(String INDEX_STORE_PATH) {
// TODO Auto-generated constructor stub
try{
IndexWriter writer = new IndexWriter(INDEX_STORE_PATH, new StandardAnalyzer(), true);
writer.setUseCompoundFile(false);

Document doc1 = new Document();
Field f1 = new Field("bookNumber", "0000003", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field f2 = new Field("bookname", "论宇称非对称模型", Field.Store.YES, Field.Index.TOKENIZED);
Field f3 = new Field("publishdate", "1999-01-01", Field.Store.YES, Field.Index.UN_TOKENIZED);
Field f4 = new Field("securitylevel", SECURITY_ADVANCED + "", Field.Store.YES, Field.Index.UN_TOKENIZED);
doc1.add(f1);
doc1.add(f2);
doc1.add(f3);
doc1.add(f4);

Document doc2 = new Document();
f1 = new Field("bookNumber", "0000005", Field.Store.YES, Field.Index.UN_TOKENIZED);
f2 = new Field("bookname", "钢铁战士", Field.Store.YES, Field.Index.TOKENIZED);
f3 = new Field("publishdate", "1995-07-15", Field.Store.YES, Field.Index.UN_TOKENIZED);
f4 = new Field("securitylevel", SECURITY_MIDDLE + "", Field.Store.YES, Field.Index.UN_TOKENIZED);
doc2.add(f1);
doc2.add(f2);
doc2.add(f3);
doc2.add(f4);

Document doc3 = new Document();
f1 = new Field("bookNumber", "0000001", Field.Store.YES, Field.Index.UN_TOKENIZED);
f2 = new Field("bookname", "相对论", Field.Store.YES, Field.Index.TOKENIZED);
f3 = new Field("publishdate", "1963-02-14", Field.Store.YES, Field.Index.UN_TOKENIZED);
f4 = new Field("securitylevel", SECURITY_ADVANCED + "", Field.Store.YES, Field.Index.UN_TOKENIZED);
doc3.add(f1);
doc3.add(f2);
doc3.add(f3);
doc3.add(f4);


Document doc4 = new Document();
f1 = new Field("bookNumber", "0000006", Field.Store.YES, Field.Index.UN_TOKENIZED);
f2 = new Field("bookname", "黑猫警长", Field.Store.YES, Field.Index.TOKENIZED);
f3 = new Field("publishdate", "1988-05-01", Field.Store.YES, Field.Index.UN_TOKENIZED);
f4 = new Field("securitylevel", SECURITY_ADVANCED + "", Field.Store.YES, Field.Index.UN_TOKENIZED);
doc4.add(f1);
doc4.add(f2);
doc4.add(f3);
doc4.add(f4);

Document doc5 = new Document();
f1 = new Field("bookNumber", "0000004", Field.Store.YES, Field.Index.UN_TOKENIZED);
f2 = new Field("bookname", "原子弹的爆破过程", Field.Store.YES, Field.Index.TOKENIZED);
f3 = new Field("publishdate", "1959-10-21", Field.Store.YES, Field.Index.UN_TOKENIZED);
f4 = new Field("securitylevel", SECURITY_ADVANCED + "", Field.Store.YES, Field.Index.UN_TOKENIZED);
doc5.add(f1);
doc5.add(f2);
doc5.add(f3);
doc5.add(f4);

Document doc6 = new Document();
f1 = new Field("bookNumber", "0000007", Field.Store.YES, Field.Index.UN_TOKENIZED);
f2 = new Field("bookname", "钢铁是怎样炼成的", Field.Store.YES, Field.Index.TOKENIZED);
f3 = new Field("publishdate", "1970-01-11", Field.Store.YES, Field.Index.UN_TOKENIZED);
f4 = new Field("securitylevel", SECURITY_MIDDLE + "", Field.Store.YES, Field.Index.UN_TOKENIZED);
doc6.add(f1);
doc6.add(f2);
doc6.add(f3);
doc6.add(f4);


Document doc7 = new Document();
f1 = new Field("bookNumber", "0000002", Field.Store.YES, Field.Index.UN_TOKENIZED);
f2 = new Field("bookname", "白毛女", Field.Store.YES, Field.Index.TOKENIZED);
f3 = new Field("publishdate", "1977-09-07", Field.Store.YES, Field.Index.UN_TOKENIZED);
f4 = new Field("securitylevel", SECURITY_NORMAL + "", Field.Store.YES, Field.Index.UN_TOKENIZED);
doc7.add(f1);
doc7.add(f2);
doc7.add(f3);
doc7.add(f4);
writer.addDocument(doc1);
writer.addDocument(doc2);
writer.addDocument(doc3);
writer.addDocument(doc4);
writer.addDocument(doc5);
writer.addDocument(doc6);
writer.addDocument(doc7);
writer.close();

//搜索时启动过滤器
Term begin = new Term("publishdate", "1900-01-01");
Term end = new Term("publishdate", "2000-01-01");

RangeQuery q = new RangeQuery(begin, end, true);

IndexSearcher searcher = new IndexSearcher(INDEX_STORE_PATH);

//使用了AdvancedSecurityFilter来作为检索结果的过滤器
Hits hits = searcher.search(q, new AdvancedSecurityFilter());

for(int i = 0; i < hits.length(); i++){
Document doc = hits.doc(i);
System.out.print("书号:");
System.out.println(doc.get("bookNumber"));
System.out.print("书名:");
System.out.println(doc.get("bookname"));
System.out.print("发布日期:");
System.out.println(doc.get("publishdate"));
System.out.print("安全级别:");
System.out.print(doc.get("securitylevel"));
int level = Integer.parseInt(doc.get("securitylevel"));
switch(level){
case SECURITY_ADVANCED:
System.out.println("高级");
break;
case SECURITY_MIDDLE:
System.out.println("中级");
break;

case SECURITY_NORMAL:
System.out.println("一般");
break;
}
System.out.println("========================");
}
}catch(IOException e){
e.printStackTrace();
}


}

//级别过滤器
public class AdvancedSecurityFilter extends Filter{
//安全级别常量
public static final int SECURITY_ADVANCED = 0;

@Override
public BitSet bits(IndexReader reader) throws IOException {
// TODO Auto-generated method stub

//首先初始化一个BigSet对象
final BitSet bits = new BitSet(reader.maxDoc());

//先将整个集合置为true
//表示当前集合内的所有文档都可以被检索到
bits.set(0, bits.size() - 1);

//构造一个Term对象,代表最高安全级别
Term term = new Term("securitylevel", SECURITY_ADVANCED + "");

//从索引中取出具有最高安全级别的文档
TermDocs termDocs = reader.termDocs(term);


//方法一

//遍历每个文档
while(termDocs.next()){
bits.set(termDocs.doc(), false);

}


/*

//方法二,打印出工作过程
//初始化一个IndexSearcher对象
IndexSearcher searcher = new IndexSearcher(reader);

Hits hits = searcher.search(new TermQuery(term));
for(int i = 0; i < hits.length(); i++){
//遍历结果,并将bits中相应的值赋给false
bits.set(hits.id(i), false);
}


*/

return bits;

}



}
public static void main(String[] args) {
// TODO Auto-generated method stub
FilterTwo ft = new FilterTwo("F:\\Lucene项目\\索引文件");
}


}
0 0
原创粉丝点击