HttpClient实现简单的网络爬虫功能

来源:互联网 发布:华康少女字体mac 编辑:程序博客网 时间:2024/05/08 22:44

思路:获取网页内容—>解析目标资源地址 —>目标资源

package com.zhiwei.common.httpClient;import java.io.BufferedInputStream;import java.io.File;import java.io.FileOutputStream;import java.io.IOException;import java.io.InputStream;import java.net.URL;import java.net.URLConnection;import java.util.ArrayList;import java.util.List;import java.util.regex.Matcher;import java.util.regex.Pattern;import org.apache.commons.httpclient.HttpClient;import org.apache.commons.httpclient.HttpException;import org.apache.commons.httpclient.HttpStatus;import org.apache.commons.httpclient.methods.GetMethod;public class ReptileThread implements Runnable{    private  String destinationUrl="";  //目标网站地址    private  String fileReg="";  //筛选目标文件的正则式:默认.jpg    private  String fileDir="";  //下载的目标文件夹    public ReptileThread(String destinationUrl,    String fileReg,String fileDir){        this.destinationUrl=destinationUrl;        this.fileReg=fileReg;        this.fileDir=fileDir;    }    //默认配置信息    public ReptileThread(){        this("http://www.qq.com","[^(\\/)]*.jpg","d:/temp");    }    //url获取文件名:正则式筛选    public static String getFileName(String fileReg,String url) {        Pattern pattern=Pattern.compile(fileReg);        Matcher matcher=pattern.matcher(url);        matcher.find();        String fileName=matcher.group();        return fileName;    }    //获取图片下载的地址列表    public  List<String> getImgAddressList(String url) throws IOException, HttpException {        String page=getNetPageContent(url);        String reg="http://[^\\s]*?(\\.jpg)";        Pattern pattern=Pattern.compile(reg);         Matcher matcher=pattern.matcher(page);        List<String> lists=new ArrayList<String>();        while(matcher.find()){  //匹配所有的元素            lists.add(matcher.group());//获取匹配到的元素的值        }        return lists;    }    //获取网页详细内容    public  String getNetPageContent(String url) throws IOException, HttpException {        HttpClient httpClient=new HttpClient();        GetMethod httpGet=new GetMethod(url);        int httpStatus=httpClient.executeMethod(httpGet);        String sb="";        if(httpStatus==HttpStatus.SC_OK){            sb=sb+new String(httpGet.getResponseBodyAsString().getBytes(),"utf-8");        }        httpGet.releaseConnection();        return sb;    }    //批量下载图片    public int downImgFromNet(String fileReg,List<String> urlList,String fileDir) throws Exception{        BufferedInputStream bis=null;        FileOutputStream fos=null;        int count=0;        for(String url:urlList){            String fileName = getFileName(fileReg,url);            URL tempUrl=new URL(url);            URLConnection urlConnection=tempUrl.openConnection();            InputStream is=urlConnection.getInputStream();            bis=new BufferedInputStream(is);            File file=new File(fileDir);            if(!file.exists()){                file.mkdirs();            }            fos=new FileOutputStream(file+"/"+fileName);            byte[] buf=new byte[1024];            while(bis.read(buf)!=-1){                fos.write(buf);            }            count++;            System.out.println("第"+count+"张图片下载完成:"+fileName);    }        if(bis!=null){            bis.close();            }            if(fos!=null){                 fos.close();            }        return count;    }    @Override    public void run() {        try {            List<String> lists = getImgAddressList(destinationUrl);            int count=downImgFromNet(fileReg, lists,fileDir);            System.err.println("目标文件夹"+fileDir+"总共"+count+"个文件下载完成.......");        } catch (Exception e) {            e.printStackTrace();        }    }    public static void main(String[] args) {        new ReptileThread().run();    }}
0 0