python爬虫,爬取蕾丝猫美女图

来源:互联网 发布:安卓源码怎么生成apk 编辑:程序博客网 时间:2024/04/25 07:48
# _*_ coding:utf-8 _*_
import urllib
import urllib2
import re
from lxml import etree
import random
import sys
#防止中文乱码
reload(sys)
sys.setdefaultencoding('utf-8')

#获取所有需要爬取的美女html页面

def allurl(url,headers):

#遍历1到137页

    for a in range(1,137):
        Newurl = url + str(a)
        request = urllib2.Request(Newurl, headers=headers)
        response = urllib2.urlopen(request).read()

        pattern = etree.HTML(response)

#提取图片链接

        link_list = pattern.xpath('//div[@class="photo"]/a/@href')
        for link in link_list:
            link2 = link[:-8]
            for a in range(1,5):
                alllink = link2 + str(a) + "-1.html"
                print alllink
                allgirl(alllink,headers = headers)
#获取所有美女的图片html页面
def allgirl(url,headers):
    request = urllib2.Request(url,headers = headers)
    response = urllib2.urlopen(request).read()
    pattern = etree.HTML(response)
    link_num = pattern.xpath('//div[@id="thread-pic"]/ul/li/img/@src')
    img_name = pattern.xpath('//div[@id="thread-pic"]/ul/li/img/@alt')
    for num,img_name in zip(link_num,img_name):
        #saveimg(num,img_name.encode("utf-8"),headers)
        name = img_name[-10:-4] + num[-21:-18]
        saveimg(num,name,headers)
#保存图片
def saveimg(url,img_name,headers):
    request = urllib2.Request(url,headers = headers)
    response = urllib2.urlopen(request).read()
    with open('/home/cgs/lesmao/' + img_name + '.jpg','wb') as f:
        f.write(response)


if __name__ == "__main__":
    url = "http://www.lesmao.com/portal.php?page="
    headers = {"User-Agent":"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:54.0) Gecko/20100101 Firefox/54.0","Referer":"http://www.lesmao.com/"}
allurl(url,headers)




这一个爬虫只用了urllib2  lxml 这样简单的方式做,因为当时刚接触爬虫,也不追求爬虫的效率。  各位需要用代码爬图片的话  自己修改下图片保存路径基本就可以用了

原创粉丝点击