Python爬取百度图片

来源:互联网 发布:arm linux centos 编辑:程序博客网 时间:2024/05/23 22:01

一、爬取链接的正则表达式

# .的使用举例
a = ‘xy123’
b = re.findall(‘x…’, a)
print b # [‘xy12’]
*****************************
# *的使用举例
a = ‘xyxy123’
b = re.findall(‘x*’,a)
print b # [‘x’, ”, ‘x’, ”, ”, ”, ”, ”]
*****************************
# ?的使用举例
a = ‘xy123’
b = re.findall(‘x?’, a)
print b # [‘x’, ”, ”, ”, ”, ”]
==========================================
”’上面的内容全部都是只需要了解即可,需要掌握的只有下面这一种组合方式(.*?)”’
==========================================
secret_code = ‘hadkfalifexxIxxfasdjifja134xxlovexx23345sdfxxyouxx8dfse’
*****************************
# .*的使用举例
b = re.findall(‘xx.*xx’, secret_code)
print b # [‘xxIxxfasdjifja134xxlovexx23345sdfxxyouxx’]
*****************************
# .*?的使用举例
c = re.findall(‘xx.*?xx’, secret_code)
print c # [‘xxIxx’, ‘xxlovexx’, ‘xxyouxx’]
*****************************
# (.*?)使用括号与不使用括号的差别
d = re.findall(‘xx(.*?)xx’, secret_code)
print d # [‘I’, ‘love’, ‘you’]
for each in d:
print each # I love you
*****************************
# 有换行的情况下匹配
s = ”’sdfxxhello
xxfsdfxxworldxxasdf”’
d = re.findall(‘xx(.*?)xx’, s)
print d # [‘fsdf’]
d = re.findall(‘xx(.*?)xx’, s, re.S)
print d # [‘hello\n’, ‘world’]

二、爬虫代码

1. 爬取无反爬虫措施的静态网站

# coding=utf-8import urllibimport re# 返回网站的源代码def getHtml(url):    # 打开网站    page = urllib.urlopen(url)    # 获取网站的源代码    html = page.read()    return htmldef getImg(html):    # 正则表达式,用来在网页源代码中找到图片的资源路径    reg = 'data-objurl="(.*?)"'    # 将正则表达式转换成正则表达式对象    imgre = re.compile(reg)    # 找出网页源码中包含img re的所有内容,返回一个序列    imglist = re.findall(imgre, html)    x = 0    # 下载图片    for imgurl in imglist:        # 设置要下载的图片资源路径和图片保存名字        urllib.urlretrieve(imgurl, '%s.jpg' % x)        x += 1    return imglisthtml = getHtml("http://image.baidu.com/search/index?tn=baiduimage&ps=1&ct=201326592&lm=-1&cl=2&nc=1&ie=utf-8&word=披萨")print(html)print(getImg(html))

url直接输入地址栏的地址,但该代码爬取不了url,大概因为百度反爬了吧。

2.爬取有反爬虫措施的百度图片

# coding = utf-8import urllibimport urllib2import reimport os#爬取有反爬虫措施的百度图片def getHtml(url):    page = urllib.urlopen(url)    html = page.read()    return htmldef getImg(html):    reg = 'ObjURL":"(.*?)"'    imgre = re.compile(reg)    imglist = re.findall(imgre, html)    newimglist = []    for img in imglist:        newimglist.append(img.replace("\\",""))    return newimglistdef downLoad(urls, path):    index = 1    for url in urls:        print("Downloading:", url)        try:            res = urllib2.Request(url)            # if 404            if str(res.status_code)[0]=="4":                print("download failed!", url)                continue        except Exception as e:            print("download failed!", url)        filename=os.path.join(path,str(index)+".jpg")        urllib.urlretrieve(url,filename)        index += 1html = getHtml("https://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=%E6%8A%AB%E8%90%A8&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word=%E6%8A%AB%E8%90%A8&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn=450&rn=30&gsm=1c2&1513946786738=")# print htmlprint(getImg(html))savepath = "/spider_data"downLoad(getImg(html),savepath)

与静态网站代码主要区别在于爬取的url格式:1)静态网站可以直接使用地址栏url;2)有反爬网站url获取方式如下。

打开百度图片网页->搜索“披萨”->审查元素->检查网络->过滤资源列表中搜索https://image.baidu.com/search ->在acjson右键复制链接->OK!
这里写图片描述

此外,还增加了一些辅助代码。如getImg(html)增加了对urllist的格式修改。

该代码可以爬,但是爬下来的图片很多打不开,而且运行一会就报错IOError: [Errno socket error] [Errno 10060] 。网友说添加一个header和设置timeout可以预防,见下一个方法。

3. 完美爬取代码

#coding=utf-8from urllib import quoteimport urllib2 as urllibimport reimport osclass BaiduImage():    def __init__(self, keyword, count=2000, save_path="img", rn=60):        self.keyword = keyword        self.count = count        self.save_path = save_path        self.rn = rn        self.__imageList = []        self.__totleCount = 0        self.__encodeKeyword = quote(self.keyword)        self.__acJsonCount = self.__get_ac_json_count()        self.user_agent = "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.95 Safari/537.36"        self.headers = {'User-Agent': self.user_agent, "Upgrade-Insecure-Requests": 1,                        "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",                        "Accept-Encoding": "gzip, deflate, sdch",                        "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6",                        "Cache-Control": "no-cache"}        # "Host": Host,    def search(self):        for i in range(0, self.__acJsonCount):            url = self.__get_search_url(i * self.rn)            response = self.__get_response(url).replace("\\", "")            image_url_list = self.__pick_image_urls(response)            self.__save(image_url_list)    def __save(self, image_url_list, save_path=None):        if save_path:            self.save_path = save_path        print "已经存储 " + str(self.__totleCount) + "张"        print "正在存储 " + str(len(image_url_list)) + "张,存储路径:" + self.save_path        if not os.path.exists(self.save_path):            os.makedirs(self.save_path)        for image in image_url_list:            host = self.get_url_host(image)            self.headers["Host"] = host            with open(self.save_path + "/%s.jpg" % self.__totleCount, "wb") as p:                try:                    req = urllib.Request(image, headers=self.headers)                    # 设置一个urlopen的超时,如果10秒访问不到,就跳到下一个地址,防止程序卡在一个地方。                    img = urllib.urlopen(req, timeout=20)                    p.write(img.read())                    p.close()                    self.__totleCount += 1                except Exception as e:                    print "Exception" + str(e)                    p.close()                    if os.path.exists("img/%s.jpg" % self.__totleCount):                        os.remove("img/%s.jpg" % self.__totleCount)        print "已存储 " + str(self.__totleCount) + " 张图片"    def __pick_image_urls(self, response):        reg = r'"ObjURL":"(http://img[0-9]\.imgtn.*?)"'        imgre = re.compile(reg)        imglist = re.findall(imgre, response)        return imglist    def __get_response(self, url):        page = urllib.urlopen(url)        return page.read()    def __get_search_url(self, pn):        return "http://image.baidu.com/search/acjson?tn=resultjson_com&ipn=rj&ct=201326592&is=&fp=result&queryWord=" + self.__encodeKeyword + "&cl=2&lm=-1&ie=utf-8&oe=utf-8&adpicid=&st=-1&z=&ic=0&word=" + self.__encodeKeyword + "&s=&se=&tab=&width=&height=&face=0&istype=2&qc=&nc=1&fr=&pn=" + str(pn) + "&rn=" + str(self.rn) + "&gsm=1000000001e&1486375820481="    def get_url_host(self, url):        reg = r'http://(.*?)/'        hostre = re.compile(reg)        host = re.findall(hostre, url)        if len(host) > 0:            return host[0]        return ""    def __get_ac_json_count(self):        a = self.count % self.rn        c = self.count / self.rn        if a:            c += 1        return c
# coding=utf-8from BaiduImage import BaiduImageimport syskeyword = "披萨"save_path = "/spider_data_"search = BaiduImage(keyword, save_path=save_path)search.search()

该代码可以预防前面说的错误,而且只要输入想要图片的关键词,就能爬下来,比较方便!至于为什么可行?我也不知道啊~
但是输入关键词便能爬取相关图片,是因为jason格式里面url的规律,只要将url里面的关键词部分替换成我们输入的即可。

参考资料:
http://blog.csdn.net/u014015972/article/details/50541839
http://bbs.csdn.net/topics/390933778
http://www.jb51.net/article/105891.htm
http://blog.csdn.net/dodouaj/article/details/54908665

原创粉丝点击