python并行编程—python利用生产者消费者模型进行多线程爬虫操作

来源:互联网 发布:c语言判断正负 while 编辑:程序博客网 时间:2024/05/23 16:17

生产者消费者模式爬韩寒博客,并用文章名保存

思路:有意识的把download 模块和生产者消费者模块独立。避免他们相互依赖的关系!

downloading模块:

# coding:gbkimport urllib.requestimport reimport osdef getUrlData(url):    try:        response = urllib.request.urlopen(url)    except Exception as e:        print(e)    return response.read().decode('utf-8')def getArticleUrlList(url,isOpenmutipThread=False,queue=None):    #得到某个url的文章列表,并返回列表,如果这个方式是多个线程开启,请将第二个参数设置成True,并传入队列    data = getUrlData(url)    reStr=r'<a title(.*?)href="(.*?)">(.*?)</a>'    ls_data =re.findall(reStr,data)    #for each in ls_data:    #    print(each)    if isOpenmutipThread and queue!=None:        queue.put(ls_data)        return None    else:        return ls_data def DownOnePage(url,filename,mutex):    #print(filename,1)    data = getUrlData(url)    with open('data_html\\'+filename+'.html','w') as f:        try:            with mutex:                f.write(data)        except:            print(filename+u'不符合文件名创建要求')def main():    #os.mkdir('data_html')    url = 'http://blog.sina.com.cn/s/articlelist_1191258123_0_1.html'    ls_data = getArticleUrlList(url)    #ls_data[0] =('',ls_data[0][1],'empty')    i = 0    for each in ls_data:        #print(each[1],each[2])        DownOnePage(each[1],each[2])        i+=1if __name__=='__main__':    #main()    pass
生产者消费者模型:
# coding:gbkimport threading import downloadimport queueimport osmutex = threading.Lock()exit_mutex = threading.Lock()class Producer(threading.Thread):    def __init__(self,q,fnProducer,*args):        super().__init__()        self.q = q        self.fnProducer =fnProducer        self.args= args    def run(self):        data = self.fnProducer(self.args[0])        #print(self.args[0])        self.q.put(data)class Consumer(threading.Thread):    def __init__(self,q,fnConsumer,*args):        super().__init__()        self.q = q        self.fnConsumer =fnConsumer        self.args= args    def run(self):        while True:            try:                data = self.q.get(block=False)            except queue.Empty:                if  exit_mutex.locked():                    #raise StopIteration                    break            else:                for each in data:                    self.fnConsumer(each[1],each[2],mutex)def main():    q = queue.Queue()    ls_producer=[]    ls_consumer=[]    for i in range(7):        url = 'http://blog.sina.com.cn/s/articlelist_1191258123_0_'+str(i+1)+'.html'        p  =Producer(q,download.getArticleUrlList,url)        p.start()        ls_producer.append(p)    for i in range(7):        p = Consumer(q,download.DownOnePage)        p.start()        ls_consumer.append(p)    for each in ls_producer:#等待所有生产者线程结束        each.join()    exit_mutex.acquire()#设置获得退出锁    for each in ls_consumer:        each.join()#等待所有的线程结束    exit_mutex.release()#释放退出锁     #等到所有结束后 if __name__=='__main__':    main()



0 0
原创粉丝点击