使用java 实现网络爬虫 demo

来源:互联网 发布:创意美工设计招聘 编辑:程序博客网 时间:2024/06/04 18:20

网络爬虫的实现原理:

          网络爬虫又叫蜘蛛,流程为:<第1步>网络蜘蛛是通过网页的链接地址来寻找网页,从网站某一个页面(通常是首页)开始,读取网页的内容,找到在网页中的其它链接地址,<第2步>通过这些链接地址寻找下一个网页,这样一直循环下去,直到把这个网站所有的网页都抓取完为止。<第3步>在抓取的网页中根据特定的网页规则来提取你想要的信息。如果把整个互联网当成一个网站,那么网络蜘蛛就可以用这个原理把互联网上所有的网页都抓取下来。所以要想抓取网络上的数据,不仅需要爬虫程序还需要一个可以接受”爬虫“发回的数据并进行处理过滤的服务器,爬虫抓取的数据量越大,对服务器的性能要求则越高。

           根据这种原理,写一个简单的网络爬虫程序 ,该程序实现的功能是获取网站发回的数据,并提取之中的网址,获取的网址我们存放在一个文件夹中,关于如何就从网站获取的网址进一步循环下去获取数据并提取其中其他数据这里就不在写了,只是模拟最简单的一个原理则可以,实际的网站爬虫远比这里复杂多,深入讨论就太多了。除了提取网址,我们还可以提取其他各种我们想要的信息。



结构目录:


源代码:
package com.sun.crawl;import org.apache.http.HttpEntity;import org.apache.http.HttpResponse;import org.apache.http.client.ClientProtocolException;import org.apache.http.client.HttpClient;import org.apache.http.client.methods.HttpGet;import org.apache.http.impl.client.DefaultHttpClient;import org.apache.http.util.EntityUtils;import sun.jdbc.odbc.JdbcOdbc;import java.io.IOException;/** * 下载此超链接的页面源代码. */public class DownloadPage {    /**     * 根据URL抓取网页内容     *     * @param url     * @return     */    public static String getContentFormUrlAndDownloadGoalContent(String url) {        /* 实例化一个HttpClient客户端 */        HttpClient client = new DefaultHttpClient();        HttpGet getHttp = new HttpGet(url);        String content = null;        HttpResponse response;        try {            /* 获得信息载体 */            response = client.execute(getHttp);            HttpEntity entity = response.getEntity();            VisitedUrlQueue.addElem(url);            if (entity != null) {                /* 转化为文本信息 */                content = EntityUtils.toString(entity);/* 判断是否符合下载网页源代码到本地的条件 */                if (FunctionUtils.isCreateFile(url)                        && FunctionUtils.isHasGoalContent(content) != -1) {                  /*                    //将抓包的数据存到磁盘上(D盘下)                    FunctionUtils.createFile(                            FunctionUtils.getGoalContent(content), url);*/                    FoodMessageBean foodMessageBean = FunctionUtils.CutHtml(FunctionUtils.getGoalContent(content));                }            }        } catch (ClientProtocolException e) {            e.printStackTrace();        } catch (IOException e) {            e.printStackTrace();        } finally {            client.getConnectionManager().shutdown();        }        return content;    }}

package com.sun.crawl;/** * Created by lenovo on 2017/8/3. */public class FoodMessageBean {    private String name = "";    private String property = "";    private String content = "";    public String getName() {        return name;    }    public void setName(String name) {        this.name = name;    }    public String getProperty() {        return property;    }    public void setProperty(String property) {        this.property = property;    }    public String getContent() {        return content;    }    public void setContent(String content) {        this.content = content;    }    @Override    public String toString() {        return "FoodMessageBean{" +                "name='" + name + '\'' +                ", property='" + property + '\'' +                ", content='" + content + '\'' +                '}';    }}

package com.sun.crawl;import java.io.*;import java.util.regex.Matcher;import java.util.regex.Pattern;public class FunctionUtils {    public static FoodMessageBean CutHtml(String content) {        FoodMessageBean f = new FoodMessageBean();        // 过滤文章内容中的html        String[] split = content.split(":</h3>");        String s = split[0].replaceAll("</?[^<]+>", "");        String[] NameAndpPoperty = s.split("的");        f.setName(NameAndpPoperty[0]);        f.setProperty(NameAndpPoperty[1]);        String s1 = split[1].replaceAll("</?[^<]+>", "");        f.setContent(s1);        return f;    }    /**     * 匹配超链接的正则表达式     */    private static String pat = "http://sc\\.zuofan\\.cn/[a-zA-Z]/[a-zA-Z]/[a-zA-Z]/*.html";    private static Pattern pattern = Pattern.compile(pat);    public static void main(String[] args) {        String url = "http://sc.zuofan.cn/guopin/shuiguo/hongsheguo/171523.html";        boolean b = url.startsWith("http://sc.zuofan.cn/");        boolean b1 = url.endsWith(".html");        System.out.println(b & b1);    }    private static BufferedWriter writer = null;    /**     * 爬虫搜索深度     */    public static int depth = 0;    /**     * 以"/"来分割URL,获得超链接的元素     *     * @param url     * @return     */    public static String[] divUrl(String url) {        return url.split("/");    }    /**     * 判断是否创建文件     *     * @param url     * @return     */    public static boolean isCreateFile(String url) {       /* Matcher matcher = pattern.matcher(url);        return matcher.matches();*/        boolean b = url.startsWith("http://sc.zuofan.cn/");        boolean b1 = url.endsWith(".html");        return b & b1;    }    /**     * 创建对应文件     *     * @param content     * @param urlPath     */    public static void createFile(String content, String urlPath) {        /* 分割url */        String[] elems = divUrl(urlPath);        StringBuffer path = new StringBuffer();        File file = null;        for (int i = 1; i < elems.length; i++) {            if (i != elems.length - 1) {                path.append(elems[i]);                path.append(File.separator);                file = new File("D:" + File.separator + path.toString());            }            if (i == elems.length - 1) {                Pattern pattern = Pattern.compile("\\w+\\.[a-zA-Z]+");                Matcher matcher = pattern.matcher(elems[i]);                if ((matcher.matches())) {                    if (!file.exists()) {                        file.mkdirs();                    }                    String[] fileName = elems[i].split("\\.");                    file = new File("D:" + File.separator + path.toString()                            + File.separator + fileName[0] + ".txt");                    try {                        file.createNewFile();                        writer = new BufferedWriter(new OutputStreamWriter(                                new FileOutputStream(file)));                        writer.write(content);                        writer.flush();                        System.out.println("创建文件成功");                    } catch (IOException e) {                        e.printStackTrace();                    } finally {                        try {                            writer.close();                        } catch (IOException e) {                            e.printStackTrace();                        }                    }                }            }        }    }    /**     * 获取页面的超链接并将其转换为正式的A标签     *     * @param href     * @return     */    public static String getHrefOfInOut(String href) {        /* 内外部链接最终转化为完整的链接格式 */        String resultHref = null;/* 判断是否为外部链接 */        if (href.startsWith("http://")) {            resultHref = href;        } else {            /* 如果是内部链接,则补充完整的链接地址,其他的格式忽略不处理,如:a href="#" */            if (href.startsWith("/")) {                resultHref = "http://sc.zuofan.cn/" + href;            }        }        return resultHref;    }    /**     * 截取网页网页源文件的目标内容     *     * @param content     * @return     */    public static String getGoalContent(String content) {        int sign = content.indexOf("<div class=\"detailsDiv3 of\">");        String signContent = content.substring(sign);        int start = signContent.indexOf(">");        int end = signContent.indexOf("</div>");        return signContent.substring(start + 1, end);    }    /**     * 检查网页源文件中是否有目标文件     *     * @param content     * @return     */    public static int isHasGoalContent(String content) {        return content.indexOf("<a class=\"detailsA1 fl f12 tc h30 lh30\" href=\"");    }}

package com.sun.crawl;public class HrefOfPage {    /**     * 获得页面源代码中超链接     */    public static void getHrefOfContent(String content) {        System.out.println("开始");        String[] contents = content.split("<a class=\"detailsA1 fl f12 tc h30 lh30\" href=\"");//需要的链接        if (contents.length == 1) {            contents = content.split("<dl class=\"cateDl1 fl ml10\">\n" +                    "                        <a href=\"");//从这些链接里面获得需要的链接//<a href="            String[] split = content.split("<a class=\"page-link\" href=\"");            contents = concat(contents, split);        }        for (int i = 1; i < contents.length; i++) {            int endHref = contents[i].indexOf("\"");            String aHref = FunctionUtils.getHrefOfInOut(contents[i].substring(                    0, endHref));            if (aHref != null) {                String href = FunctionUtils.getHrefOfInOut(aHref);                if (!UrlQueue.isContains(href)                        && !VisitedUrlQueue.isContains(href) && (href.startsWith("http://sc.zuofan.cn/"))) {                    UrlQueue.addElem(href);                }            }        }        System.out.println(UrlQueue.size() + "--抓取到的连接数");        System.out.println(VisitedUrlQueue.size() + "--已处理的页面数");    }    static String[] concat(String[] a, String[] b) {        String[] c = new String[a.length + b.length];        System.arraycopy(a, 0, c, 0, a.length);        System.arraycopy(b, 0, c, a.length, b.length);        return c;    }}

package com.sun.crawl;import java.sql.SQLException;public class Test {    public static void main(String[] args) throws SQLException {        String url = "http://sc.zuofan.cn/wugu/";        UrlQueue.addElem(url);        String url2 = "http://sc.zuofan.cn/guopin/";        UrlQueue.addElem(url2);        String url3 = "http://sc.zuofan.cn/yulei/";        UrlQueue.addElem(url3);        String url4 = "http://sc.zuofan.cn/dannai/";        UrlQueue.addElem(url4);        String url5 = "http://sc.zuofan.cn/tiaowei/";        UrlQueue.addElem(url5);        String url6 = "http://sc.zuofan.cn/yao/";        UrlQueue.addElem(url6);        String url7 = "http://sc.zuofan.cn/dzp/";        UrlQueue.addElem(url7);        UrlDataHanding[] url_Handings = new UrlDataHanding[100];        for (int i = 0; i < 100; i++) {            url_Handings[i] = new UrlDataHanding();            new Thread(url_Handings[i]).start();        }    }}

package com.sun.crawl;public class UrlDataHanding implements Runnable {    /**     * 下载对应页面并分析出页面对应的URL放在未访问队列中。     *     * @param url     */    public void dataHanding(String url) {        HrefOfPage.getHrefOfContent(DownloadPage.getContentFormUrlAndDownloadGoalContent(url));    }    public void run() {        while (!UrlQueue.isEmpty()) {            dataHanding(UrlQueue.outElem());        }    }}

package com.sun.crawl;import java.util.LinkedList;public class UrlQueue {    /**     * 超链接队列     */    public static LinkedList<String> urlQueue = new LinkedList<String>();    /**     * 队列中对应最多的超链接数量     */    public static final int MAX_SIZE = 10000;    public synchronized static void addElem(String url) {        urlQueue.add(url);    }    public synchronized static String outElem() {        return urlQueue.removeFirst();    }    public synchronized static boolean isEmpty() {        return urlQueue.isEmpty();    }    public static int size() {        return urlQueue.size();    }    public static boolean isContains(String url) {        return urlQueue.contains(url);    }}

package com.sun.crawl;import java.util.HashSet;/** * 已访问url队列 * * @author liuyazhuang */public class VisitedUrlQueue {    public static HashSet<String> visitedUrlQueue = new HashSet<String>();    public synchronized static void addElem(String url) {        visitedUrlQueue.add(url);    }    public synchronized static boolean isContains(String url) {        return visitedUrlQueue.contains(url);    }    public synchronized static int size() {        return visitedUrlQueue.size();    }}