Python爬虫urllib2笔记(二)

来源:互联网 发布:娇喘合集 网络歌手 编辑:程序博客网 时间:2024/05/19 16:37

urllib2升级模块--浏览器可访问爬虫访问返回403说明有防爬虫

#-*-coding:utf-8-*-#urllib2升级模块--浏览器可访问爬虫访问返回403说明有防爬虫#F12查看Network(刷新)点击names查看Headers模仿用户登录import urllib2url="http://blog.csdn.net/q383700092"#模仿请求Requestreq=urllib2.Request(url)#复制网页里的Headers里的Request#用户代理req.add_header("User-Agent","Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36")#get访问的url地址req.add_header("GET",url)#主机信息req.add_header("Host","blog.csdn.net")#从哪里链接过来--有些网站必须是内部网站链接过去才能访问req.add_header("Referer","http://blog.csdn.net/")html=urllib2.urlopen(req)print html.read() 

封装版本

#-*-coding:utf-8-*-#改进版本-封装性更好import urllib2url="http://blog.csdn.net/q383700092"#键对值的字典信息my_headers={"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/52.0.2743.82 Safari/537.36","Host":"blog.csdn.net","Referer":"http://blog.csdn.net/","GET":url}#模仿请求Requestreq=urllib2.Request(url,headers=my_headers)html=urllib2.urlopen(req)print html.read() 


#各种浏览器的userAgent--多个用户头防止被识别

#-*-coding:utf-8-*-#改进版本-封装性更好import urllib2import randomurl="http://blog.csdn.net/q383700092"#各种浏览器的userAgent--多个用户头防止被识别my_headers=["Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","User-Agent:Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1","Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1","Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11","Mozilla/5.0 (Windows NT 6.1) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.41 Safari/535.1 QQBrowser/6.9.11079.201"]#代理IP(暂无),假的用户头部信息def get_content(url,headers):"""@获取403禁止访问的网页"""#随机选择random_header=random.choice(headers)print random_headerreq=urllib2.Request(url)req.add_header("User-Agent",random_header)req.add_header("Host","blog.csdn.net")req.add_header("Referer","http://blog.csdn.net/")req.add_header("GET",url)content=urllib2.urlopen(req).read()return contentprint get_content(url,my_headers)




0 0
原创粉丝点击