爬虫1.1爬取斗图啦图片(关于open函数和urlretrieve函数)

来源:互联网 发布:ie加载项里面没有java 编辑:程序博客网 时间:2024/06/15 18:25

文章只是我作为NewBird     ٩꒰▽ ꒱۶⁼³₌₃     学习的一小点小点的进步

还请不要笑我⁄(⁄ ⁄•⁄ω⁄•⁄ ⁄)⁄

我就直接贴代码了,我不会说很技术的话。


1.创建项目命令:

scrapy startproject <project_name>

例子:

scrapy startproject myproject

创建成功如下图:

文件夹目录如下

1
2
3
4
5
6
7
8
9
10
myproject/
    scrapy.cfg          -------项目的配置文件
    myproject/         -------该项目的python模块。之后您将在此加入代码。
        __init__.py
        items.py       --------项目中的item文件.
        pipelines.py  --------项目中的pipelines文件
        settings.py    --------项目的设置文件
        spiders/        --------放置spider代码的目录
            __init__.py
            ...

以上复制了别人的https://www.cnblogs.com/pachongshou/p/6125858.html


2.明确目标——Item.py

import scrapyclass Doutu2Item(scrapy.Item):    # define the fields for your item here like:    # name = scrapy.Field()    img_url=scrapy.Field()    name=scrapy.Field()


3.制作爬虫,先爬再取——spider.py

# -*- coding: utf-8 -*-import scrapyfrom ..items import Doutu2Itemfrom pyquery import PyQuery as pqimport osimport requestsfrom  urllib import requestimport reclass DoutuSpider(scrapy.Spider):    name = 'doutu'    allowed_domains = ['doutula.com']    start_urls = ['http://doutula.com/photo/list/?page={}'.format(i)for i in range(1,3)]    def parse(self, response):        jpy=pq(response.text)        #我这里使用了PyQuery        Zurl=jpy('#pic-detail > div > div.col-sm-9 > div.random_picture > ul > li > div > div>a').items()        i=0        for it in Zurl:            #遍历Zurl            print(it.text())            #实例化item对象,进行存储            item=Doutu2Item()            #PyQuery中获取属性attr()            #以下是动图和jpg的url获取            item['img_url']=it('img').attr('data-original')            item['name']=it('p').text()            if  not item['img_url']:                item['img_url']=it('img').eq(1).attr('data-original')            print(item['img_url'])            i+=1            # if os.path.exists('斗图'):            #     print('文件夹已存在')            # else:            #     os.makedirs('斗图')            #     print('文件夹已经创建')            if not os.path.exists('doutu'):                print('创建文件夹:{}'.format('doutu'))                os.mkdir('doutu')            if not os.path.exists('pic'):                print('创建文件夹:{}'.format('pic'))                os.mkdir('pic')            #正则表达式替换名称中有特殊字符的            rstr = r"[\/\\\:\*\?\"\<\>\|]"  # '/ \ : * ? " < > |'            new_title = re.sub(rstr, "_", item['name'])  # 替换为下划线            #第一种存储方式:打开文件路径的时候我不太会弄,所以错了几次,相对路径比较好,            with open('pic/%s.jpg' % new_title,'wb') as f:                f.write(requests.get(item['img_url']).content)            #第二种存储方式            try:                request.urlretrieve(item['img_url'],'doutu\%s.gif'% new_title)            except:                pass            print(i)            print('__________________________________________________')        yield  item

4.处理spider抽取的item——pipeline.py

from scrapy.exceptions import DropItemfrom scrapy import logimport jsonfrom pymongo import MongoClientfrom  scrapy.conf import settingsclass Doutu2Pipeline(object):
#这里是初始化方法
#进行数据库的一些设置    def __init__(self):        connection=MongoClient(            settings['MONGODB_SERVER'],            settings['MONGODB_PORT']        )        db=connection[settings['MONGODB_DB']]        self.collection=db[settings['MONGODB_COLLECTION']]    def process_item(self, item, spider):        self.collection.insert(dict(item))        print("我已进入数据库")        valid=True        for data in item:            if not data:                valid=False                raise DropItem('MIssing{}'.format(data))        if valid:            log.msg('已经进入数据库',level=log.DEBUG,spider=spider)        return item
__init__方法在类的一个对象被建立时,马上运行。这个方法可以用来对你的对象做一些你希望的 初始化 。注意,这个名称的开始和结尾都是双下划线


5.配置文件——setting.py

# Scrapy settings for doutu2 project## For simplicity, this file contains only settings considered important or# commonly used. You can find more settings consulting the documentation:##     http://doc.scrapy.org/en/latest/topics/settings.html#     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.htmlBOT_NAME = 'doutu2'SPIDER_MODULES = ['doutu2.spiders']NEWSPIDER_MODULE = 'doutu2.spiders'# Crawl responsibly by identifying yourself (and your website) on the user-agentUSER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'# Obey robots.txt rulesROBOTSTXT_OBEY = False# Configure maximum concurrent requests performed by Scrapy (default: 16)CONCURRENT_REQUESTS = 32# Configure a delay for requests for the same website (default: 0)# See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay# See also autothrottle settings and docsDOWNLOAD_DELAY = 0.2# The download delay setting will honor only one of:#CONCURRENT_REQUESTS_PER_DOMAIN = 16#CONCURRENT_REQUESTS_PER_IP = 16# Disable cookies (enabled by default)COOKIES_ENABLED = False# Disable Telnet Console (enabled by default)#TELNETCONSOLE_ENABLED = False# Override the default request headers:#DEFAULT_REQUEST_HEADERS = {#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',#   'Accept-Language': 'en',#}# Enable or disable spider middlewares# See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.htmlSPIDER_MIDDLEWARES = {   'doutu2.middlewares.Doutu2SpiderMiddleware': 543,}# Enable or disable downloader middlewares# See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#DOWNLOADER_MIDDLEWARES = {#    'doutu2.middlewares.MyCustomDownloaderMiddleware': 543,#}# Enable or disable extensions# See http://scrapy.readthedocs.org/en/latest/topics/extensions.html#EXTENSIONS = {#    'scrapy.extensions.telnet.TelnetConsole': None,#}# Configure item pipelines# See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.htmlITEM_PIPELINES = {   'doutu2.pipelines.Doutu2Pipeline': 300,}MONGODB_SERVER = "localhost"MONGODB_PORT = 27017MONGODB_DB = "test"MONGODB_COLLECTION = "doutu2"


6.中间件进行request和response一些设置——middlewares.py

        这个暂时还不怎么U

 
阅读全文
0 0
原创粉丝点击