爬虫,可用于增加访问量和抓取网站全页内容

来源:互联网 发布:淘宝网涂料 编辑:程序博客网 时间:2024/05/21 00:17

#-*- coding:utf-8 -*-

import httpimport randomimport urllibimport urllib.request as requestfrom urllib.error import URLErrorimport timedef get_ip_list_local(file_name):    with open(file_name,mode='r',encoding='utf-8') as f:       return f.readlines()def load_web_content(product_url,isagent,agent_ip):    print('http://' + agent_ip)    req = request.Request(product_url)    req.add_header('User-Agent',                   'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36')    if isagent:        try:            proxy_handler = urllib.request.ProxyHandler({'http': 'http://' + agent_ip, 'https':'http://' + agent_ip})            opener = urllib.request.build_opener(proxy_handler)            data = opener.open(req)            dt_re = data.read().decode('utf-8')            data.close()            return dt_re        except TimeoutError as e:            print('TimeoutError code: ', e)            return None        except ConnectionResetError as e:            print('ConnectionResetError code: ', e)            return None        except URLError as e:            print('URLError code: ', e)            print('Reason: ', e.reason)            return None        except http.client.IncompleteRead as e:            print('IncompleteRead : ', e)            return None        except http.client.RemoteDisconnected as e:            print('RemoteDisconnected : ', e)            return None    else:        r = urllib.request.urlopen(req)        return r.read().decode('utf-8')def process_spider(agent_ips, product_url):    while True:        content = load_web_content(product_url, True, random.choice(agent_ips))        print(content)        #睡眠interval_time 秒后进行下一次内容抓取        interval_time = random.randint(15, 60)        time.sleep(interval_time)        print(str(interval_time)+'s 已过,即将进行下一次内容抓取。。。。。。')if __name__ == '__main__':    file_name = '.\agent_ip\\enable_agent_ip.txt'    agent_ips = get_ip_list_local(file_name)    #每隔30~80s爬取一个产品网页内容,并存入本地    #每个系列产品隔10~30s进行爬取操作    product_url = 'url'    process_spider(agent_ips, product_url)    print('spider loads product content\'s game over!')
代理IP,enable_agent_ip.txt:
61.143.38.53:8118211.127.160.240:8080125.165.2.211:8080182.30.224.180:80210.245.26.140:3128185.13.228.124:100946.101.92.212:8059.111.80.139:80195.178.56.32:8080103.227.60.210:8080190.248.158.194:808050.245.168.108:808096.9.69.210:53281186.193.186.3:8080125.165.2.211:8080197.210.230.5:8085.204.234.251:8080218.202.122.221:5328137.26.86.57:8080187.44.182.194:8080167.205.6.6:80152.169.134.125:9999141.105.162.190:8080190.248.136.229:53281104.41.51.173:808091.221.103.183:808013.126.69.46:80118.102.1.114:8081122.53.59.194:80187.5.218.25:5328191.193.128.76:8080211.41.163.99:3128204.11.243.70:3128104.223.72.199:3128180.211.115.155:808143.208.9.42:808094.177.199.78:3128101.53.136.123:8080191.241.36.156:312840.85.184.189:8080201.249.88.225:80114.199.118.186:8080101.53.136.123:808086.120.79.89:3128131.255.153.171:3128117.239.66.73:80177.53.56.208:808040.85.184.189:8080203.189.142.23:5328159.50.68.34:5328137.17.177.197:3128139.196.13.42:80202.62.9.187:808083.143.26.70:53281186.193.30.101:312887.229.54.42:8080202.83.162.214:8080187.87.48.62:808047.74.44.92:312836.67.97.223:8080103.242.239.161:65103192.241.134.233:312868.171.65.230:808189.31.44.108:3128180.211.91.130:8080177.5.28.2:8080200.114.97.14:53281187.20.97.68:5555552.63.138.194:808043.245.119.106:8080103.195.24.81:51552118.179.151.172:8080118.97.29.203:8080192.99.55.120:3128185.76.147.151:3128

阅读全文
0 0
原创粉丝点击