import java.io.*;
import java.io.BufferedReader;
import java.io.InputStreamReader;
import java.net.URL;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedHashSet;
//import java.util.logging.Level;
//import java.util.logging.Logger;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
public class SearchCrawler implements Runnable {
private HashMap> disallowListCache = newHashMap>();
ArrayList errorList = new ArrayList();// 错误信息
ArrayList result = new ArrayList(); // 搜索结果
String startUrl;// 开始搜索的起点
int maxUrl;// 最大的搜索url数目
String searchString;// 需要搜索的字符串
boolean caseSensitive = false;// 是否区分大小写
boolean limitHost = false;// 是否在限制主机内搜索
String outputdir;
public SearchCrawler(String startUrl, int maxUrl, StringsearchString,String outputdir) {
this.startUrl =startUrl;
this.maxUrl =maxUrl;
this.searchString =searchString;
this.outputdir=outputdir;
}
//获取爬获的结果
public ArrayList getResult() {
return result;
}
public void run() {// 启动搜索线程
crawl(startUrl, maxUrl,searchString, limitHost, caseSensitive);
}
// 检测URL的格式,只允许HTTP URL,参数为URL的字符串形式,返回URL对象
private URL verifyUrl(String url) {
if(!url.toLowerCase().startsWith("http://"))
return null;
URL verifiedUrl =null;
try {
verifiedUrl = newURL(url);
} catch (Exception e){
return null;
}
return verifiedUrl;
}
//读取获得URL的网站内容,参数为URL,返回内容的字符串形式
private String downloadPage(URL pageUrl) {
try {
BufferedReader reader =new BufferedReader(new InputStreamReader(
pageUrl.openStream()));
//打开URL,读取内容
String line;
StringBuffer pageBuffer= new StringBuffer();
while ((line =reader.readLine()) != null) {
pageBuffer.append(line);
}
//将读取到的内容转化为字符串
//System.out.println("从网站上读取的内容:"+pageBuffer.toString()+"\n");
returnpageBuffer.toString();
} catch (Exception e){
}
return null;
}
// 去除URL中的WWW,返回URL的字符串形式,参数为带有WWW的URL
private String removeWwwFromUrl(String url) {
int index =url.indexOf("://www.");
if (index != -1) {
return url.substring(0,index + 3) + url.substring(index + 7);
}
return (url);
}
//获得网站内容中正确无误的URL,加入到队列中
private ArrayList retrieveLinks(URL pageUrl, StringpageContents,
HashSet crawledList,boolean limitHost) {
//用正则表达式匹配网站的锚,不区分大小写
int filepointer=0;
Pattern p =Pattern.compile("]",
Pattern.CASE_INSENSITIVE);
Matcher m =p.matcher(pageContents);
ArrayList linkList = newArrayList();
//创建一个用于保存链接的表
while (m.find()) {
String link =m.group(1).trim();
if (link.length() <1) {
continue;
}
//
if (link.charAt(0) =='#') {
continue;
}
if(link.indexOf("mailto:") != -1) {
//所查找链接中包含mailto字符,则跳出本次循坏
continue;
}
if(link.toLowerCase().indexOf("javascript") != -1) {
//所查找连接中包含javascript字符,则跳出本次循坏
continue;
}
//对于残缺的URL,补全构成完整的URL
if (link.indexOf("://")== -1) {
if(link.charAt(0) == '/') {//所查字符串中不存在//,但是存在/,即改网站是内部网站,加上主机名和端口号,构成新的URL
link= "http://" + pageUrl.getHost() + ":"
+ pageUrl.getPort() + link;
} else{
String file = pageUrl.getFile();//URL表示的是文件
if(file.indexOf('/') == -1) {
link = "http://" + pageUrl.getHost() + ":"
+ pageUrl.getPort() + "/" +link;
}else {
String path = file.substring(0,
file.lastIndexOf('/') +1);//表示的是路径
link = "http://" + pageUrl.getHost() + ":"
+ pageUrl.getPort() + path +link;
}
}
}
int index =link.indexOf('#');
if (index != -1){//出现了#,去除#后面的部分
link =link.substring(0, index);
}
link =removeWwwFromUrl(link);
URL verifiedLink =verifyUrl(link);
if (verifiedLink ==null) {
continue;
}
//当为限制的主机或者主机名不相同时,忽略
if (limitHost
&& !pageUrl.getHost().toLowerCase().equals(
verifiedLink.getHost().toLowerCase())) {
continue;
}
//已经包含的URL的时,忽略
if(crawledList.contains(link)) {
continue;
}
linkList.add(link);
System.out.println("检索到的URL:"+link);
try {
savetofile(link,"//爬取到的URL"+"//"+filepointer+".txt");
} catch (Exception ex) {
System.out.println(ex.getMessage());
}
filepointer++;
}
return (linkList);
}
// 在获取的网站内容中匹配需要检索的内容
private boolean searchStringMatches(String pageContents,
String searchString,boolean caseSensitive) {
String searchContents =pageContents;
if (!caseSensitive) {//不区分大小写
searchContents =pageContents.toLowerCase();
}
Pattern p =Pattern.compile("[\\s]+");
String[] terms =p.split(searchString);
System.out.println("需要检索的词为:"+terms);
for (int i = 0; i <terms.length; i++) {
//如果不存在需要查找的单词,则返回false,反之循环完毕,返回true
if (caseSensitive){
if(searchContents.indexOf(terms[i]) == -1) {
return false;
}
} else {
if(searchContents.indexOf(terms[i].toLowerCase()) == -1) {
return false;
}
}
}
return true;
}
// 搜索主函数
public ArrayList crawl(String startUrl, int maxUrls,
String searchString,boolean limithost, boolean caseSensitive) {
intfile_pointer=0;//显示文件名
HashSet crawledList = newHashSet();
//已经爬取的网页
LinkedHashSet toCrawlList= new LinkedHashSet();
//需要爬取的网页
if (maxUrls < 1){
errorList.add("InvalidMax URLs value.");
System.out.println("Invalid Max URLs value.");
}
if (searchString.length()< 1) {
errorList.add("MissingSearch String.");
System.out.println("Missing search String");
}
if (errorList.size() >0) {
System.out.println("err!!!");
return errorList;
}
// 去除链接中的WWW标志
startUrl =removeWwwFromUrl(startUrl);
//加入到查找的队列中
toCrawlList.add(startUrl);
//当已经检索的URL达到上限时停止
while (toCrawlList.size()> 0) {
if (maxUrls != -1){
if(crawledList.size() == maxUrls) {
break;
}
}
String url =toCrawlList.iterator().next();
//获取linkedhashset下的底部url
toCrawlList.remove(url);
// 从队列中去除获取到的URL
URL verifiedUrl =verifyUrl(url);
//将字符串类型的URL转化为URL对象
//if(!isRobotAllowed(verifiedUrl)) {
// continue;
//}//检测是否允许访问
crawledList.add(url);
//加入到已经访问的队列中
String pageContents =downloadPage(verifiedUrl);
//获取网页内容
try{
savetofile(pageContents,"//抓取的网页内容//"+file_pointer);
}
catch(Exception e)
{
System.out.println(e.getMessage());
}
file_pointer++;
if (pageContents != null&& pageContents.length() > 0) {
ArrayList links =retrieveLinks(verifiedUrl,
pageContents, crawledList, limitHost);
//网页内容不为空时,检索网页中的url,加入到队列中
toCrawlList.addAll(links);
if(searchStringMatches(pageContents, searchString,
caseSensitive)) {
result.add(url);//将获取的内容加入到结果队列中
System.out.println("网页中检索到的包含检索词的有效URL:"+url);
try {
savetofile(url,"//含有检索词的URL//result.txt");
} catch (Exception ex) {
System.out.println(ex.getMessage());
}
}
}
}
return result;
}
//将内容保存到文件中
public void savetofile(String content,String filename)throwsException
{
BufferedWriterwriter=new BufferedWriter(newFileWriter(outputdir+filename+".txt"));
writer.write(content);
//System.out.println("内容:"+content+"\n");
//System.out.println("文件名:"+filename+"\n");
writer.close();
}
//
//public void testsavetofile()
//{
// String content="Thisis test";
// Stringfilename="filename";
// try {
// savetofile(content,filename);
// } catch (Exception ex){
// System.out.println("Exitexception:"+ex.getMessage());
// }
//}
public static void main(String[] args) throws Exception{
File filepath=newFile("D:\\spider\\");
Stringpath=filepath.getAbsolutePath();
SearchCrawler crawler =new SearchCrawler("http://bbs.rednet.cn/forum-69-1.html",1,"abc",path);
Thread search = newThread(crawler);
//Stringerror_filename="error_information";
System.out.println("正在查找...");
System.out.println("结果:");
//crawler.testsavetofile();
search.start();
try {
search.join();//等待该线程结束
} catch(InterruptedException e) {
//System.out.println("出错信息:"+e.getMessage());
//crawler.savetofile(e.getMessage(),error_filename);
}
}
}