抓取搜狗壁纸公园的图片(一)

来源:互联网 发布:sql server企业版安装 编辑:程序博客网 时间:2024/04/28 16:04

利用urllib2和beautifulsoup抓取搜狗壁纸公园的图片并下载...核心代码...后续会继续优化...

http://bizhi.sogou.com/park



#-*-coding:utf-8-*-'''Created on 2016-3-15@author: 201507220131'''from bs4 import BeautifulSoupfrom bs4 import elementimport urllib2import reclass SougouPic():    '''                抓取搜狗壁纸    '''    def __init__(self):        self.Baseurl = 'http://bizhi.sogou.com'        self.itemsList = []        self.nameIndex = 0    def getBaseUrl(self):        return self.Baseurl    #获取网页代码,默认抓取首页代码    def getCode(self,url=None):        if url:            request = urllib2.Request(self.Baseurl+url)        else:            request = urllib2.Request(self.Baseurl+'/park')        response = urllib2.urlopen(request)        return response.read().decode('gbk')    #抓取分类信息    def getClassfiy(self,code):        pattern1 = re.compile('<div.*?class="class_alta.*?<span class="white_font">(.*?)</span></a>.*?',re.S)        pattern2 = re.compile('<div class="tag_mid font_b3d465">.*?f=nav">(.*?)</a>',re.S)        items1 = re.findall(pattern1,code)        return items1    #抓取图片分类信息    def getClassfiy2(self,code):        soup = BeautifulSoup(code)        items1_Code = soup.find_all(class_ = 'white_font')        for index,item in enumerate(items1_Code):            childList = []            childDic = {}            if index == 0:                divcode = soup.find_all(class_='class_more_side class_more_side_all')                for i in divcode:                    for c in i.children:                        if type(c) is element.Tag:                            childDic = {'name':c.string,'url':c.get('href'),'child':''}                            childList.append(childDic)            else:                divCode = item.previous_element.next_sibling.next_element.next_element.next_element                for i in divCode:                    if type(i) is element.Tag:                        childDic = {'name':i.string,'url':i.get('href'),'child':''}                        childList.append(childDic)            dic = {'name':item.string,'url':item.previous_element.get('href'),'child':childList}            self.itemsList.append(dic)        return self.itemsList    #获取图片链接    def getPicUrl(self,code):        soup = BeautifulSoup(code)        items = soup.find_all(class_='wallpaper_dis')        picUrlList = []        for i in items:            img = i.next_element.next_element.next_element.next_element            picUrlList.append(img.get('src'))        return picUrlList    #下载图片    def downLoad(self,urlList):        for url in urlList:            data = urllib2.urlopen(url)            file = open('pic/pic'+str(self.nameIndex)+'.jpg','wb')            file.write(data.read())            file.close()            self.nameIndex += 1sougou = SougouPic()homeCode = sougou.getCode()itemsList = sougou.getClassfiy2(homeCode)#print itemsListbaseUrl = sougou.getBaseUrl()for item in itemsList:    print item.get('name').decode('utf-8'),'#########################'    childs = item.get('child')    print '子分类:'    for child in childs:        print child.get('name').decode('utf-8'),':',baseUrl+child.get('url')picCode = sougou.getCode('/label/index/588?f=popup')items = sougou.getPicUrl(picCode)for i in items:    print '############'    print isougou.downLoad(items)    


0 0
原创粉丝点击