python爬虫学习,正则表达式练习

来源:互联网 发布:云计算平台招标文件 编辑:程序博客网 时间:2024/05/06 03:51

爬取了某写真网站。没有继续完善。最终结果是4333张图。部分重复。网速快大概一小时左右

链接:http://pan.baidu.com/s/1mix0AkK 密码:sf9u

代码直接复制到notepad++保存为.py 右键Edit whit IDE 然后按F5运行。或者直接下载源文件。Python 版本32bit 2.7.12

#!/usr/bin/env python# -*- coding: utf-8 -*-import reimport urlparseimport urllib2from datetime import datetimeimport robotparserimport urllibimport osimport timeclass ScrapeCallback:    def __init__(self,html, regex, url):        self.html = html        self.regex = regex        self.url = url    def downpic(self):        datepic = re.findall(self.regex, self.html)        if datepic:            for i in datepic:                j = urlparse.urlparse(i).path.replace('/','_')                j = unicode(j,'utf8')                print j                urllib.urlretrieve(i, 'D:/datepic/%s' % j)def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, user_agent='wswp', num_retries=1, scrape_callback=None):    crawl_queue = [seed_url]    seen = {seed_url: 0}    num_urls = 0    rp = get_robots(seed_url)    throttle = Throttle(delay)    headers = {}    if user_agent:        headers['User-agent'] = user_agent    while crawl_queue:        url = crawl_queue.pop()        print crawl_queue        print len(crawl_queue)        depth = seen[url]        if rp.can_fetch(user_agent, url):            throttle.wait(url)            html = download(url, headers, num_retries=num_retries)            links = []            if scrape_callback:                dlh = ScrapeCallback(html, 'http://.+?\.jpg', url)                dlh.downpic()            if depth != max_depth:                if link_regex:                    links.extend(link for link in get_links(html) if re.search(link_regex, link))                for link in links:                    link = normalize(seed_url, link, url)                    if link not in seen and 'htmllist' not in link:                        seen[link] = depth + 1                        if same_domain(seed_url, link):                            crawl_queue.append(link)        else:            print 'Blocked by robots.txt:', urlclass Throttle:    def __init__(self, delay):        self.delay = delay        self.domains = {}    def wait(self, url):        domain = urlparse.urlsplit(url).netloc        last_accessed = self.domains.get(domain)        if self.delay > 0 and last_accessed is not None:            sleep_secs = self.delay - (datetime.now() - last_accessed).seconds            if sleep_secs > 0:                time.sleep(sleep_secs)        self.domains[domain] = datetime.now()def download(url, headers, num_retries, data=None):    print 'Downloading:', url    request = urllib2.Request(url, data, headers)    opener = urllib2.build_opener()    try:        response = opener.open(request)        html = response.read()        code = response.code    except urllib2.URLError as e:        print '-----------------Download error:', e.reason        html = ''        if hasattr(e, 'code'):            code = e.code            if num_retries > 0 and 500 <= code < 600:                html = download(url, headers, proxy, num_retries-1, data)        else:            code = None    return htmldef normalize(seed_url, link, url):    link, _ = urlparse.urldefrag(link)    if '_' in link :        urlpath = urlparse.urlparse(url).path.split('/')[-2] + '/'        link = urlpath + link    link = urlparse.urljoin(seed_url, link)    return linkdef same_domain(url1, url2):    return urlparse.urlparse(url1).netloc == urlparse.urlparse(url2).netlocdef get_robots(url):    rp = robotparser.RobotFileParser()    rp.set_url(urlparse.urljoin(url, '/robots.txt'))    rp.read()    return rpdef get_links(html):    webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)    #print webpage_regex.findall(html)    return webpage_regex.findall(html)if __name__ == '__main__':    date_path = 'D:/datepic'    if os.path.exists(date_path) == False:        os.makedirs(date_path)    os.system("explorer.exe %s" % date_path.replace('/', '\\'))    #uumeitu.com/[a-zA-Z]+/    link_crawler('http://www.uumeitu.com', r"(/tgod/|com/[a-zA-Z]*?/|list|\d+_\d+?\.html)", delay=0,num_retries=3, max_depth=3, user_agent='baidu', scrape_callback=True)    #link_crawler('http://www.uumeitu.com', '(/Ugirls/|/mingxing/|/mote/|/siwa/|/tgod/|/ligui/|/tuigirl/)', delay=0, num_retries=4, max_depth=2, user_agent='baidu', scrape_callback=True)


0 1
原创粉丝点击