python 爬虫 CSDN博客下载-改进版

来源:互联网 发布:js style.color 编辑:程序博客网 时间:2024/04/29 22:24

主要是对代码结构进行了调整

新分出了用户类

加入了多线程,实现多个用户同时下载

# -*- coding: utf-8 -*-import osimport sysimport urllibimport urllib2import reimport threadimport timeimport codecsimport threadingclass USER:def __init__(self,userName,path):self.name=userNameself.articleList=[]self.articleNum=0self.pageIndex=1self.rootURL='http://blog.csdn.net'self.contentURL='http://blog.csdn.net/'+self.name+'/article/list/'+str(self.pageIndex)self.rootPath=path+'/BlogCSDN/'+self.namedef openURL(self,url):headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64)'}try:request = urllib2.Request(url, headers = headers)response = urllib2.urlopen(request)pageCode = response.read().decode('utf-8')return pageCodeexcept urllib2.URLError, e:if hasattr(e, "reason"):print u"连接CSDN,错误原因", e.reasonreturn Nonedef getList(self):try:url = self.rootURL+'/' +self.name+'/article/list/'+ str(self.pageIndex)if self.articleNum==0:pageCode=self.openURL(url)if pageCode!=None:#正则表达式当有多个括号时,返回为字符串元祖列表[(),(),...()]#每一个元组元素包含的字符串数与表达式中的括号一致#比如下式返回值可能为[('20','18','3')]#正则表达式有1个或0个括号时,返回值为字符串列表['aaaa','ddd']pattern=re.compile('blog_statistics">.*?<span>(\d+).*?</span>.*?<span>(\d+).*?</span>.*?<span>(\d+).*?</span>',re.S)items=re.findall(pattern,pageCode)for m in items[0]:self.articleNum+=int(m)num=0articleList=[]while num<self.articleNum:url = self.rootURL+'/' +self.name+'/article/list/'+ str(self.pageIndex)pageCode=self.openURL(url)if not pageCode:return Nonepattern=re.compile('link_title"><a href="(.*?)">(.*?)</a>',re.S)items=re.findall(pattern,pageCode)for item in items:articleList.append([self.rootURL+item[0].strip(),item[1].strip()])#print self.rootURL+item[0]+'-----'+item[1].strip()num+=1if num>=self.articleNum:breakself.pageIndex+=1self.articleList=articleListexcept urllib2.URLError, e:if hasattr(e, "reason"):print u"连接CSDN,错误原因", e.reasonreturn Nonedef getArticle(self):for item in self.articleList:pageCode=self.openURL(item[0])if not pageCode:print u'链接无效'return Nonepattern=re.compile('class="article_content">(.*?)</div>',re.S)content=re.findall(pattern,pageCode)articleID=(item[0].split('/'))[6]title=item[1]fileName=self.rootPath+'/'+title+'---'+articleID+'.txt'#这个地方刚开始少写了'[0]',造成编码对不上,花费了许多时间text=content[0]text=self.repairCode(text)self.saveArticle(fileName,text)print self.name+':'+articleID+'.txt has saved!'def saveArticle(self,fileName,text):if not os.path.exists(self.rootPath):os.makedirs(self.rootPath)try:if os.path.exists(fileName):print u'%s 文件已经存在' % (fileName)return Nonefobj =codecs.open(fileName, 'w+','utf-8')fobj.write(text)fobj.close()except IOError:print 'IO error!'return Nonedef repairCode(self,text):#去除标签replace = re.compile('</\w+>')text = re.sub(replace,"\r\n",text)replace = re.compile('<.*?>')text = re.sub(replace,"",text)rTermsDict={'<':'<','>':'>','&':'&','"':'"',' ':' ',''':'\'','+':'+'}for item in rTermsDict.keys():replace = re.compile(item)text = re.sub(replace,rTermsDict[item],text)#if codeType!='cpp':#扩展不同的代码#return text#对齐代码块return textdef start(self):print 'reading %s blog...' % (self.name)self.getList()self.getArticle()print "%s  blog %d has saved!" % (self.name,self.articleNum)class CSDNSPIDER:def __init__(self):# 存放程序是否继续运行的变量self.rootPATH='E:/Python_Spider'self.enable = Falsedef start(self):#想继续扩展成多线程userList=['waytoaccept','xiaosebi1111','zhiyuan_2007']thrList=[]for name in userList:user=USER(name,self.rootPATH)t=threading.Thread(target=user.start,name=name)thrList.append(t)for thr in thrList:thr.start()for thr in thrList:thr.join()self.enable = Trueprint u'欢迎下次使用CSDN爬虫!'spider = CSDNSPIDER()spider.start()

感觉爬虫越来越有意思了

0 0
原创粉丝点击