使用python爬取百度百科python词条相关的1000个网页的标题和简介
来源:互联网 发布:2017淘宝网双11销量 编辑:程序博客网 时间:2024/05/01 21:35
spider_main.py
# -*- coding: utf-8 -*-from baidu_spider import url_manager, html_downloader, html_parser, html_outputerclass SpiderMain(object): def __init__(self): self.urls=url_manager.UrlManager() self.downloader=html_downloader.HtmlDownloader() self.parser=html_parser.HtmlParser() self.outputer=html_outputer.HtmlOutputer() def craw(self, root_url): count=1 self.urls.add_new_url(root_url) while self.urls.has_new_url(): try: new_url=self.urls.get_new_url() print("craw %d:%s" %(count,new_url)) html_cont=self.downloader.download(new_url) new_urls,new_data=self.parser.parse(new_url,html_cont) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) if count == 15: break count=count+1 except Exception as e: print(e) print("craw failed") self.outputer.output_html()if __name__=="__main__": root_url="http://baike.baidu.com/item/Python" obj_spider = SpiderMain() obj_spider.craw(root_url)
url_manager.py
# -*- coding: utf-8 -*-class UrlManager(object): def __init__(self): self.new_urls = set() self.old_urls = set() def add_new_url(self, url): if url is None: return if url not in self.new_urls and url not in self.old_urls: self.new_urls.add(url) def add_new_urls(self,urls): if urls is None or len(urls)==0: return for url in urls: self.add_new_url(url) def has_new_url(self): return len(self.new_urls) != 0 def get_new_url(self): new_url =self.new_urls.pop() self.old_urls.add(new_url) return new_url
html_parser.py
# -*- coding: utf-8 -*-import re#from urllib.parse import urlparseimport urllib.parsefrom urllib import parsefrom bs4 import BeautifulSoupclass HtmlParser(object): def parse(self, page_url, html_cont): if page_url is None or html_cont is None: return soup = BeautifulSoup(html_cont,"html.parser") new_urls = self._get_new_urls(page_url,soup) new_data = self._get_new_data(page_url,soup) return new_urls,new_data def _get_new_urls(self, page_url, soup): try: new_urls=set() links = soup.findAll('a',href=re.compile(r"^/item/")) for link in links: new_url=link['href'] new_full_url=urllib.parse.urljoin(page_url,new_url) new_urls.add(new_full_url) return new_urls except Exception as e: print(e) def _get_new_data(self, page_url, soup): res_data={} res_data['url']=page_url title_node=soup.find('dd',class_="lemmaWgt-lemmaTitle-title").find("h1") res_data['title']=title_node.get_text() summary_node = soup.find('div',class_="lemma-summary") res_data['summary']=summary_node.get_text() return res_data
html_outputer.py
# -*- coding: utf-8 -*-class HtmlOutputer(object): def __init__(self): self.datas = [] def collect_data(self, data): if data is None: return self.datas.append(data) def output_html(self): fout=open('output.html','w') fout.write("<html>") #fout.write("<head><meta http-equiv='content-type' content='text/html;charset=utf-8'></head>") fout.write("<body>") fout.write("<table>") for data in self.datas: fout.write("<tr>") fout.write("<td>%s</td>" % data['url']) fout.write("<td>%s</td>" % data['title'].encode("utf-8")) fout.write("<td>%s</td>" % data['summary'].encode("utf-8")) fout.write("</tr>") fout.write("</table>") fout.write("</body>") fout.write("</html>") fout.close()
html_downloader.py
# -*- coding: utf-8 -*-from urllib import requestclass HtmlDownloader(object): def download(self, url): if url is None: return response = request.urlopen(url) if response.getcode() != 200: return else: return response.read().decode('utf-8')
想直接看效果,可以到github.com上直接下载我的baidu_spider项目
地址:https://github.com/yeshell/baidu_spider/
我自己出错在html_downloader.py,里面有个if url is None: 我写成了 if url in None: 然后运行后只出一条结果,然后报错argument of type ‘NoneType’ is not iterable。craw failed。
0 0
- 使用python爬取百度百科python词条相关的1000个网页的标题和简介
- 简单的python爬虫(爬取百度百科词条)
- 编写Python代码——爬取百度百科Python词条相关1000个页面数据【未完慕课】
- Python爬虫,爬取百度百科词条
- Python爬虫爬取百度百科词条
- python爬取百度百科词条内容
- 实践项目十:爬取百度百科Python词条相关1000个页面数据(慕课简单爬虫实战)
- Java爬虫爬取python百度百科词条及相关词条页面
- 【Python爬虫】爬取百度百科python相关的1000个页面
- Python 爬虫的实践运用(1)--爬取百度百科的词条
- 使用python实现简单的百度百科词条爬虫
- Python简单爬虫开发的学习笔记整理(爬取百度百科词条)
- 按条件爬取百度百科词条及其相关词条的ID
- Python3爬虫之四简单爬虫架构【爬取百度百科python词条网页】
- Python爬虫----实例: 抓取百度百科Python词条相关1000个页面数据
- python爬虫-百度百科词条
- Python 爬取百度词条Python Demo
- Python爬取百度百科1000个页面
- win7 64 位系统焦点丢失解决方案
- 13期4月份期刊《菜鸟成长记》
- dom4j 工具类
- c++第四次试验
- 【Unity】简单的分数排行榜功能实现(Web数据库)
- 使用python爬取百度百科python词条相关的1000个网页的标题和简介
- java单例和静态类区别
- Oracle数据库instr函数的应用
- Amlogic读写I2C测试节点
- andoird gradle 渠道包
- 使用Fresco注意的问题
- ViewPager无限轮播
- SerializeField 序列化域结合HideInInspector的使用
- 资源|17类对抗网络经典论文及开源代码(附源码)