Python开发爬虫爬取百度百科词条信息(源码下载)

来源:互联网 发布:java中多线程的使用 编辑:程序博客网 时间:2024/06/05 00:32

下面使用Python开发一个网页爬虫,爬取百度百科词条信息,整个程序涉及到url管理器,html下载器,html解析器,html显示以及调度程序:

程序结构:


spider_main.py:爬虫的调度程序

url_manager.py:爬虫URL管理器,维护两个set,一个为将要爬取信息的url,一个为已经爬取过的url

html_downloader.py:html下载器

html_parser.py:html内容解析器

html_outputer.py:结果收集和展示


【爬虫的流程图】



spider_main.py [调度程序]:

# coding=utf-8"""这是调度程序,爬取步骤:(1)调用url管理器获取需要爬取的url(2)通过html下载器下载网页;(3)由html解析器解析获得url网页中需要的信息和新的url集合,以便后续爬取;(4)收集爬取到的信息,生成html文件,进行爬取结果展示"""import url_managerimport html_downloaderimport html_parserimport html_outputerclass SpiderMain(object):    def __init__(self):        self.urls = url_manager.UrlManager() # Url管理器        self.downloader = html_downloader.HtmlDownloader()        self.parser = html_parser.HtmlParser()        self.outputer = html_outputer.HtmlOutputer()    def craw(self, root_url):        count = 1        self.urls.add_new_url(root_url)        while self.urls.has_new_url():            try: # 由于有些url已经失效,进行下载会异常,故而进行异常捕获                # url管理器中获取一个新的,待爬取的url                new_url = self.urls.get_new_url()                print "crawing {0} : {1}".format(count, new_url)                # 调用html下载器,下载new_url对应的html的内容                html_doc = self.downloader.download(new_url)                # 调用html解析器,解析html内容,获取到网页内容中新的url和需要的信息                new_urls, new_data = self.parser.parse(new_url, html_doc)                # 将新的url存储到url管理器中                self.urls.add_new_urls(new_urls)                # outputer收集本次爬取,解析得到的网页信息                self.outputer.collect_data(new_data)                if count == 50: # 爬取100条数据即结果爬取                    break                count += 1            except:                print '--- craw falled! ---'        print 'craw finished!'        # 爬取完毕,调用outputer显示爬取的结果        self.outputer.output_html()# 编写模块的主入口if __name__ == '__main__':    # 爬虫开始爬取的根url,为百度百科中关于Python解释的网页链接    root_url = 'http://baike.baidu.com/view/21087.htm'    obj_spider = SpiderMain() # 实例化一个爬虫对象    obj_spider.craw(root_url) # 启动爬取信息

url_manager.py  [URL管理器]:

# coding=utf-8class UrlManager(object):    def __init__(self):        self.new_urls = set()        self.old_urls = set()    def add_new_url(self, new_url):        if new_url is None:            return        if new_url not in self.new_urls and new_url not in self.old_urls:            self.new_urls.add(new_url)    def has_new_url(self):        return len(self.new_urls) != 0    def get_new_url(self):        new_url = self.new_urls.pop()        self.old_urls.add(new_url)        return new_url    def add_new_urls(self, urls):        if urls is None:            return        for url in urls:            self.add_new_url(url)

html_downloader.py [HTML内容下载器]:

# coding=utf-8import urllib2class HtmlDownloader(object):    def download(self, url):        if url is None:            return        response = urllib2.urlopen(url)        if response.getcode() != 200:            return None        return response.read().decode('utf-8')

html_parser.py  [HTML解析器]:

# coding=utf-8import refrom bs4 import BeautifulSoupimport urlparseclass HtmlParser(object):    def parse(self, page_url, html_doc):        if page_url is None or html_doc is None:            return        soup = BeautifulSoup(html_doc, 'html.parser', from_encoding='utf8')        # 获得本网页中新的url        new_urls = self._get_new_urls(page_url, soup)        # 获得本网页中需要爬取得到的信息        new_data = self._get_new_data(page_url, soup)        return new_urls, new_data    def _get_new_urls(self, page_url, soup):        new_urls = set()        # 要匹配的节点<a target="_blank" href="/view/2561555.htm">计算机程序设计语言</a>        pat = re.compile(r"/view/\d*\.htm")        links = soup.find_all('a', href=pat)        for link in links:            new_url = link['href'] # 获得节点的href属性值, /view/2561555.htm            new_full_url = urlparse.urljoin(page_url, new_url) #拼接成完整的url            new_urls.add(new_full_url)        return new_urls    def _get_new_data(self, page_url, soup):        new_data = {}        new_data['url'] = page_url        # 匹配<dd class="lemmaWgt-lemmaTitle-title"><h1>Python</h1>节点,获得title        title_node = soup.find('dd', class_='lemmaWgt-lemmaTitle-title').find('h1')        new_data['title'] = title_node.get_text() # 获得title_node的文字信息        # 匹配<div class="lemma-summary" label-module="lemmaSummary">节点,获得summary        summary_node = soup.find('div', class_='lemma-summary')        new_data['summary'] = summary_node.get_text() # 获得summary_node的文字信息        return new_data

html_outputer.py  [结果收集和显示]:

#coding=utf-8class HtmlOutputer(object):    def __init__(self):        self.datas = []    def collect_data(self, new_data):        if new_data is None:            return        self.datas.append(new_data)    def output_html(self):        fout = open('output.html', 'w')        fout.write('<html><head><meta charset="UTF-8"></head>')        fout.write('<body>')        fout.write('<table border="1" cellspacing="0" cellpadding="0">')        for data in self.datas:            fout.write('<tr>')            fout.write('<th>{0}</th>'.format(data['title'].encode('utf-8')))            fout.write('<td>{0}\n{1}</td>'.format(data['url'].encode('utf-8'), data['summary'].encode('utf-8')))            fout.write('</tr>')        fout.write('</table>')        fout.write('</body>')        fout.close()

运行脚本文件,出现下面的打印:



等待输出:"craw finished!" ,抓取完毕,在当目录下生成了一个output.html的文件:

output.html



【CSDN源码下载地址】

http://download.csdn.net/detail/tianmaxingkong_/9667206

1 0