链家网爬虫

来源:互联网 发布:在淘宝上的原创服饰 编辑:程序博客网 时间:2024/05/17 05:02
import requestsfrom bs4 import BeautifulSoupheaders={        'UserAgent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'}def get_details(url):    res=requests.get(url,headers=headers)    soup=BeautifulSoup(res.text,'html.parser')    titles=soup.select('#js-ershoufangList > div.content-wrapper > div.content > div.m-list > ul > li > div > div.prop-title > a')    adresses=soup.select('#js-ershoufangList > div.content-wrapper > div.content > div.m-list > ul > li > div > div.info-table > div > span.info-col.row2-text > a.laisuzhou > span')    prices=soup.select('#js-ershoufangList > div.content-wrapper > div.content > div.m-list > ul > li > div > div.info-table > div > div > span.total-price.strong-num')    unit_prices=soup.select('#js-ershoufangList > div.content-wrapper > div.content > div.m-list > ul > li > div > div.info-table > div > span.info-col.price-item.minor')    dd=[]    for title,adress,price,unit_price in zip(titles,adresses,prices,unit_prices):        data={'title':title.get_text().strip(),'adress':adress.get_text().strip(),'price':price.get_text().strip(),'unit_price':unit_price.get_text().strip()}        dd.append(data)    return ddurl1='http://sh.lianjia.com/ershoufang/d2'#print(get_details(url1))'''for i in range(1,4):    urls='http://sh.lianjia.com/ershoufang/d{}'    urlss=urls.format(i)    datas=get_details(urlss)    #print(datas)'''lj=[]for i in  range(1,30):    urls='http://sh.lianjia.com/ershoufang/' +'d'+str(i)    #urlss=urls.format(i)    datas=get_details(urls)    lj.append(datas)    #print(datas)import pandas """ for p in range(len(lj)):    lj[p]=pandas.DataFrame(lj[p])""" lj=list(map(pandas.DataFrame,lj))re= pandas.concat(lj)    re.index=list(range(re.shape[0]))re.to_excel('re.xls')#a1=list(map(pandas.DataFrame,lj))
原创粉丝点击