二.BeautifulSoup多线程下载百思不得姐图片

来源:互联网 发布:天猫好还是淘宝好 编辑:程序博客网 时间:2024/05/17 02:43
#coding:utf-8import requestsfrom requests.exceptions import HTTPError, ConnectionErrorimport threadingfrom bs4 import BeautifulSoupimport reimport os,timeimport Queueclass get_Url():    def __init__(self,num):        self.num=num        self.headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'}        self.url='http://www.budejie.com/pic/{}'        self.url_all=[]    def get_pic(self):        for i in range(1,self.num):            try:                rq=requests.get(self.url.format(i),headers=self.headers,timeout=5)                pt = r'data-original="(\w.+?\.\w+)" title='                url_list= re.findall(re.compile(pt),rq.text)                for url in url_list:                    self.url_all.append(url)            except HTTPError as e:                print e            except ConnectionError as e:                print e        return self.url_allclass ThreadUrl(threading.Thread):    def __init__(self, queue):        threading.Thread.__init__(self)        self.queue = queue        self.headers = {            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:48.0) Gecko/20100101 Firefox/48.0",        }        self.workpath='E:\\os\\baisi_mp4'    def run(self):        while not self.queue.empty():            url_name = self.queue.get()            print u'正在下载%s'%url_name            name=os.path.basename(url_name)            with open(self.workpath+'\\'+name,'wb') as ps:                ps.write(requests.get(url_name,headers=self.headers).content)            self.queue.task_done()def main():    queue=Queue.Queue()    p=get_Url(10).get_pic()    for i in p:        queue.put(i)    for t in range(10):        t=ThreadUrl(queue)        t.setDaemon(True)        t.start()    queue.join()if __name__=='__main__':    start = time.time()    main()    print("spend Time: %s" % (time.time() - start))
原创粉丝点击