Python爬虫实战(九):爬取动态网页

来源:互联网 发布:淘宝商家进货渠道 编辑:程序博客网 时间:2024/06/06 00:59
#coding=utf-8import reimport jsonimport requestsfrom prettytable import PrettyTabledef getHtml(url):    data = {        'page':1,        'num':40,        'sort':'symbol',        'asc':1,        'node':'cyb',        'symbol':'',        '_s_r_a':'page'}    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'}        try:            page = requests.post(url,data = data,headers = headers)          page.encoding = 'gbk'          html = page.text        return html    except:           return ""def getdata(html):    data = html.replace(':','":')    data = data.replace(',',',"')    data = data.replace('{','{"')    data = data.replace('"{','{')    data = re.sub('\d+":\d+":\d+','',data)    data = json.loads(data)    row = PrettyTable()    row.field_names = ["代码", "名称", "最新价", "涨跌额","涨跌幅","买入","卖出","昨收","今开","最高"                       ,"最低","成交量/手","成交额/万"]    for item in data:        row.add_row((item['symbol'],item['name'],item['trade'],item['pricechange'],item['changepercent']              ,item['buy'],item['sell'],item['settlement'],item['open'],item['high']              ,item['low'],item['volume'],item['amount']))    print(row)    if __name__=='__main__':    url = 'http://vip.stock.finance.sina.com.cn/quotes_service/api/json_v2.php/Market_Center.getHQNodeData?'    html = getHtml(url)    getdata(html)#coding=utf-8import reimport jsonimport requestsfrom prettytable import PrettyTabledef getHtml(url):     data = {        'page.pageNo':2,        'tempPageSize':40,        }     headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:56.0) Gecko/20100101 Firefox/56.0'}      page = requests.post(url,headers = headers,data = data)       html = page.text     print (html)if __name__=='__main__':    url = 'http://datacenter.mep.gov.cn:8099/ths-report/report!list.action?xmlname=1465594312346'    getHtml(url)

原创粉丝点击