python简易爬虫
来源:互联网 发布:vb进度条控件使用 编辑:程序博客网 时间:2024/05/18 00:04
这篇文章主要讲解 python如何实现简易爬虫
爬虫流程
打开种子url —-> 获取种子url页面中所有的url —-> 判断是否被爬取过,未爬取过的url添加到url列表中 —-> 解析页面中需要的信息 —-> 写入数据库
上述流程中可以抽象出5个对象:
- bootstrap 启动器
- downloader 下载器
- parser 解析器
- url_manager url管理器
- outputer 输出器
启动器(main.py)
先看启动器是如何实现的:
#!/usr/bin/env python# _*_ coding:utf-8 _*_"""Created on 2017/9/23@author: tt"""import spider_outputerimport spider_parserimport spider_downloaderimport spider_url_managerclass Main: def __init__(self): self.urls = spider_url_manager.UrlManager() self.downloader = spider_downloader.Downloader() self.outputer = spider_outputer.Outputer() self.parser = spider_parser.Parser() def craw(self, root_url): self.urls.add_new_url(root_url) count = 1 while self.urls.has_new_url(): try: new_url = self.urls.get_new_url() print('craw %d : %s' % (count, new_url)) html_content = self.downloader.download(new_url) new_urls, new_data = self.parser.parser(new_url, html_content) self.urls.add_new_urls(new_urls) self.outputer.collect_data(new_data) count += 1 except Exception as e: print(e)if __name__ == "__main__": url = 'https://www.zhihu.com/people/li-xiao-miao-70/activities' main = Main() main.craw(url)
下载器(spider_downloader.py)
#!/usr/bin/env python# -*- coding: utf-8 -*-"""Created on 2017/9/23@author: tt"""import stringfrom urllib import requestfrom urllib.parse import quoteuser_agent = 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36'class Downloader: def download(self, url): if url is None: return None _url = quote(url, safe=string.printable) req = request.Request(_url, headers={'User-Agent': user_agent}) response = request.urlopen(req) if response.getcode() != 200: print("request bad") return None html = response.read() return html.decode('utf8')
解析器(spider_parser.py)
#!/usr/bin/env python# -*- coding: utf-8 -*-"""Created on 2017/9/23@author: tt"""from urllib import parsefrom bs4 import BeautifulSoupimport user_infoclass Parser: def parser(self, page_url, html_content): if page_url is None or html_content is None: return soup = BeautifulSoup(html_content, "html.parser") new_urls = self._get_new_urls(page_url, soup) new_data = self._get_new_data(page_url, soup) return new_urls, new_data def _get_new_urls(self, page_url, soup): new_urls = set() links = soup.find_all('a') for link in links: new_url = link.get('href') new_full_url = parse.urljoin(page_url, new_url) new_urls.add(new_full_url) return new_urls def _get_new_data(self, page_url, soup): ret_data = [] author_infos = soup.find_all(class_='AuthorInfo') print(len(author_infos)) for author_info in author_infos: user_id = author_info.find(class_='UserLink-link').get('href') user_id = user_id[user_id.find('e/') + 2:] photo = author_info.find(class_='AuthorInfo-avatar').get('srcset') pos = photo.find(' ') photo = photo[:pos] name = author_info.find('meta', attrs={"itemprop": "name"}).get('content') profession = author_info.find(class_='AuthorInfo-badgeText').get_text() user = user_info.UserInfo(user_id, name, photo, profession) ret_data.append(user) return ret_data
url管理器(spider_url_manager.py)
#!/usr/bin/env python# -*- coding: utf-8 -*-"""Created on 2017/9/23@author: tt"""class UrlManager: def __init__(self): self.visited_url = set() self.visit_url = set() def add_new_url(self, url): if url is None: return if url not in self.visit_url and url not in self.visited_url: self.visit_url.add(url) def has_new_url(self): return len(self.visit_url) != 0 def add_new_urls(self, urls): if urls is None or len(urls) == 0: return for url in urls: self.add_new_url(url) def get_new_url(self): new_url = self.visit_url.pop() self.visited_url.add(new_url) return new_url
输出器(spider_outputer.py)
#!/usr/bin/env python# -*- coding: utf-8 -*-"""Created on 2017/9/23@author: tt"""import user_daoclass Outputer: def __init__(self): self.user_dao = user_dao.UserDao() def collect_data(self, data): if data is None: print('None') return else: for d in data: self.user_dao.add_user(d)
其他类
数据库帮助类(db_helper.py)
#!/usr/bin/env python# -*- coding: utf-8 -*-"""Created on 2017/9/23@author: tt"""import pymysqlclass DbHelper: def _get_connection(self): return pymysql.Connect(host='127.0.0.1', user='root', passwd='root', db='crawler', port=3306, charset='utf8') def execute_update(self, sql, params): connectin = self._get_connection() cursor = connectin.cursor() try: if params is None: s = sql else: s = (sql % params) result = cursor.execute(s) connectin.commit() print('rows:', result) except Exception as e: print(e) connectin.rollback() finally: cursor.close() connectin.close() def query(self, sql, params=None): connection = self._get_connection() cursor = connection.cursor() try: if params is None: s = sql else: s = (sql % params) cursor.execute(s) rows = cursor.fetchall() for row in rows: print(row) except Exception as e: print(e) connection.rollback() finally: cursor.close() connection.close()
用户数据访问类(user_dao.py)
#!/usr/bin/env python# -*- coding: utf-8 -*-"""Created on 2017/9/23@author: tt"""import db_helperclass UserDao: def __init__(self): self.db = db_helper.DbHelper() def add_user(self, user): sql = '''INSERT IGNORE INTO zhihu_user(user_id, photo, name, profession) values('%s', '%s', '%s', '%s')''' params = (user.user_id, user.photo, user.name, user.profession) self.db.execute_update(sql=sql, params=params)
用户信息类(user_info.py)
#!/usr/bin/env python# -*- coding: utf-8 -*-"""Created on 2017/9/23@author: tt"""class UserInfo: def __init__(self, user_id, name, photo, profession): self.user_id = user_id self.name = name self.photo = photo self.profession = profession def __str__(self): return 'UserInfo(name=' + self.name + ', photo=' + self.photo + ", profession=" + self.profession + ')\n' def __repr__(self): return self.__str__()
在python中print在输出对象是调用的是对象的__str__函数
这个小的demo爬取的是知乎用户id,头像,名称,职业信息
阅读全文
0 0
- Python简易的爬虫
- Python初级简易爬虫
- python简易爬虫制作
- python简易爬虫
- python简易爬虫
- python实现简易采集爬虫
- python实现简易采集爬虫
- python编写的简易爬虫
- mac 上python简易爬虫
- python实现简易网络爬虫
- 2015.12.25Python 简易爬虫-
- Python简易百度百科爬虫
- 简易python爬虫--修真四万年
- Python简易爬虫,爬取斗鱼颜值美女!!
- 爬虫系列1:python简易爬虫分析
- 【简易Python爬虫】 初试爬虫_简易Python图片爬虫实现
- Python简易爬虫以及嵌套数据类型
- Python简易爬虫以及嵌套数据类型
- redis-3.2.10 单机安装
- Mysql常用函数
- 【系列推荐】Android自定义控件三部曲文章索引
- 梯度提升树(GBDT)原理小结
- poj2785 折半枚举 双向查找
- python简易爬虫
- 【Maven+SSM】Mybatis区别于mysql的其他标签
- 文章标题
- STM32外部配置
- 流计算产品预研
- Codeforces Round #446 (Div 2)
- 【《Real-Time Rendering 3rd》 提炼总结】(十) 第十一章 · 非真实感渲染(NPR)相关技术总结
- 【ACM日记】hihocoder #1637 : 逃离单身节
- 如何从数据库加载1000w数据