python01
来源:互联网 发布:上古世纪美女捏脸数据 编辑:程序博客网 时间:2024/05/29 08:34
import reimport urlparseimport urllib2import timefrom datetime import datetimeimport robotparserimport Queue#链接爬虫'''一个链接爬虫需要考虑以下几个问题:1.网页可能不存在。就要用到try和except语句,捕获异常。2.临时性的错误。比如服务器过载返回的503 Service Unavailable错误。就要多尝试几次下载。3.代理。用户代理user_agent='wswp'。4.用正则表达式。'<a[^>]+href=["\'](.*?)["\']'.5.相对链接问题。如果是相对链接就应该创建绝对链接。urlparse.urljoin()6.重复爬取。爬取网页的时候,经常会出现将要爬取的网页中也有爬取过的链接,这样会造成不断循环。所以要建立一个URL管理器,管理爬取过的和未爬取的7.爬虫协议(robots.txt),所以要引入robotparser模块,以避免下载禁止爬取的URL8.用代理访问某个网站。9.下载限速。如果我们爬取网站的速度过快,就会面临被封禁或者服务器过载的风险。所以应当在两次下载之间添加延时。delay10.爬虫陷阱。最好设置一个爬取深度(max_depth)——记录到达当前网页经过了多少链接。'''def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, headers=None, user_agent='wswp', proxy=None, num_retries=1): """Crawl from the given seed URL following links matched by link_regex """ # the queue of URL's that still need to be crawled 双向队列里面存储url crawl_queue = Queue.deque([seed_url]) # the URL's that have been seen and at what depth seen = {seed_url: 0} # track how many URL's have been downloaded num_urls = 0 rp = get_robots(seed_url)#获取robots.txt throttle = Throttle(delay)#下载限速 headers = headers or {} if user_agent: headers['User-agent'] = user_agent#用户代理 while crawl_queue: url = crawl_queue.pop()#移除列表中的元素,并且返回该元素的值 # check url passes robots.txt restrictions if rp.can_fetch(user_agent, url):#确定指定的用户代理是否允许访问网页 throttle.wait(url)#延迟 html = download(url, headers, proxy=proxy, num_retries=num_retries) links = [] depth = seen[url] if depth != max_depth: # can still crawl further if link_regex: # filter for links matching our regular expression links.extend(link for link in get_links(html) if re.match(link_regex, link)) for link in links: link = normalize(seed_url, link) #返回绝对链接 # check whether already crawled this link if link not in seen: seen[link] = depth + 1 # check link is within same domain if same_domain(seed_url, link): # success! add this new link to queue crawl_queue.append(link) # check whether have reached downloaded maximum num_urls += 1 if num_urls == max_urls: break else: print 'Blocked by robots.txt:', url#下载限速class Throttle: """Throttle downloading by sleeping between requests to same domain """ def __init__(self, delay): # amount of delay between downloads for each domain self.delay = delay # timestamp of when a domain was last accessed self.domains = {} def wait(self, url): domain = urlparse.urlparse(url).netloc#服务器位置 last_accessed = self.domains.get(domain) if self.delay > 0 and last_accessed is not None: sleep_secs = self.delay - (datetime.now() - last_accessed).seconds if sleep_secs > 0: time.sleep(sleep_secs) self.domains[domain] = datetime.now()def download(url, headers, proxy, num_retries, data=None): print 'Downloading:', url request = urllib2.Request(url, data, headers) opener = urllib2.build_opener() if proxy: proxy_params = {urlparse.urlparse(url).scheme: proxy} opener.add_handler(urllib2.ProxyHandler(proxy_params)) try: response = opener.open(request) html = response.read() code = response.code except urllib2.URLError as e: print 'Download error:', e.reason html = '' if hasattr(e, 'code'): code = e.code if num_retries > 0 and 500 <= code < 600: # retry 5XX HTTP errors return download(url, headers, proxy, num_retries - 1, data) else: code = None return htmldef normalize(seed_url, link): """Normalize this URL by removing hash and adding domain """ link, _ = urlparse.urldefrag(link) # remove hash to avoid duplicates urldefrag(url)将url分解成去掉fragment的新url和去掉的fragment的二元组 return urlparse.urljoin(seed_url, link)#绝对链接def same_domain(url1, url2): """Return True if both URL's belong to same domain """ #将url分解成部件的6元组 return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netlocdef get_robots(url): """Initialize robots parser for this domain """ rp = robotparser.RobotFileParser() rp.set_url(urlparse.urljoin(url, '/robots.txt'))#绝对链接 rp.read() return rpdef get_links(html): """Return a list of links from html """ # a regular expression to extract all links from the webpage #re.compile()函数将正则表达式的字符串形式编译为Pattern实例,然后使用Pattern实例处理文本并获得匹配结果(一个Match实例),最后使用Match实例获得信息,进行其他的操作。 webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE) # list of all links from the webpage return webpage_regex.findall(html)if __name__ == '__main__': link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, user_agent='BadCrawler') link_crawler('http://example.webscraping.com', '/(index|view)', delay=0, num_retries=1, max_depth=1, user_agent='GoodCrawler')
阅读全文
0 0
- python01
- python01
- python01 Hello world!
- 简单的输出 python01
- [Python01]-Python入门
- python01-基本数据类型
- 活学活用Python01:谷歌翻译小助手
- linux设备驱动模型
- 简历怎么写(集合自大佬的善意提醒)
- 123
- java 设计模式之初探代理模式
- android studio external tool
- python01
- hdoj1003(DP)
- double等大数据精确计算工具类
- Java-数组初始化方式
- 出差在外的Intern如何解决x230/x240电脑重置登录密码的问题
- kotlin学习途径
- eclipse中使用spring boot 入门开发(包含:与jsp页面和数据库交互,cmd打包运行war包)
- 每个Android程序员应该知道的Kotlin
- shell下常用语句