Pyhton实例,抓取百度词条关于Python的内容(二)

来源:互联网 发布:知乎 林存德 编辑:程序博客网 时间:2024/06/05 15:55

直接上代码
1.UrlManager 管理器

# coding:utf8class UrlManager(object):    #初始化,待爬取URL和已爬取URL    def __init__(self):        self.new_urls = set()        self.old_urls = set()    #添加新URL进管理器    def add_new_url(self, url):        if url is None:            return        if url not in self.new_urls and url not in self.old_urls:            self.new_urls.add(url)    #批量添加URLS    def add_new_urls(self, urls):        if urls is None or len(urls) == 0:            return        for url in urls:            self.add_new_url(url)    def has_new_url(self):        return len(self.new_urls) != 0    #pop方法可以把其中的一个URL给弹出,并且移除    def get_new_url(self):        new_url = self.new_urls.pop()        self.old_urls.add(new_url)        return new_url

2.Urldoawnloader 下载器

# coding:utf8import urllib2class HtmlDownloader(object):    def download(self, url):        if url is None:            return None        response = urllib2.urlopen(url)        if response.getcode() !=200:            return None        return response.read()

3.Parser 解析器

# -*- coding: UTF-8 -*-import reimport urlparsefrom bs4 import BeautifulSoupclass HtmlParser(object):    def parse(self, page_url, html_cont):        if page_url is None or html_cont is None:            return        soup = BeautifulSoup(html_cont,'html.parser',from_encoding='utf-8')        new_urls = self._get_new_urls(page_url,soup)        new_data = self._get_new_data(page_url,soup)        return new_urls,new_data    def _get_new_urls(self, page_url, soup):        new_urls = set()        # /view/123.htm        links = soup.find_all('a', href=re.compile(r"/view/\d+\.htm"))        for link in links:            new_url = link['href']            #urljoin 函数可以将new_url 按page_url 的格式拼接成一个全新的url            new_full_url = urlparse.urljoin(page_url,new_url)            new_urls.add(new_full_url)        return new_urls    #获取的数据为 title and summary    def _get_new_data(self, page_url, soup):        res_data = {}        #url        res_data['url'] = page_url        #获取 title        #<dd class="lemmaWgt-lemmaTitle-title"> <h1>Python</h1>        title_node = soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find('h1')        res_data['title'] = title_node.get_text()        #获取 summary        #<div class="lemma-summary" label-module="lemmaSummary">        summary_node = soup.find('div',class_="lemma-summary")        res_data['summary'] = summary_node.get_text()        return res_data

4.Outputer 输出器

# coding:utf8class HtmlOutputer(object):    def __init__(self):        self.datas = []    def collect_data(self,data):        if data is None:            return        self.datas.append(data)    def output_html(self):        fout = open('outputer.html','w')        fout.write("<html>")        fout.write('<body>')        fout.write('<table>')        #ascii        for data in self.datas            fout.write("<tr>")            fout.write('<td>%s</td>' % data['url'])            fout.write('<td>%s</td>' % data['title'].encode("utf-8"))            fout.write('<td>%s</td>' % data['summary'].encode("utf-8"))            fout.write("</tr>")        fout.write('</table>')        fout.write('</body>')        fout.write("</html>")        fout.close()

总结:BeautifulSoup的使用方法为:创建BeautifulSoup对象->搜索节点find-all/find->访问节点(名称,属性,文字)。
形如;soup = BeautifulSoup(html_cont,’html.parser’,from_encoding=’utf-8’),第一个参数传网页,第二个参数传解析器,第三个参数编码。至此,可以完全爬取百度百科的Python词条。
不足之处欢迎指正。

附git链接:https://github.com/gexrior/PytBaidu.git

0 0
原创粉丝点击