赶集网招聘信息数据分析与可视化

来源:互联网 发布:数据挖掘建模 编辑:程序博客网 时间:2024/04/29 08:47

一、使用python编写爬虫——使用urllib库下载网页,使用xpath解析

提取页面中我们所需的信息,公司名称、薪资待遇、工作地点、工作经验、最低学历、招聘人数、公司规模,然后结构化输出到txt文件中,以备之后的分析使用。

#! /usr/bin/env python3# -*- coding:utf-8 -*-from urllib.request import Requestimport stringimport urllibimport lxml.htmldef getHtml(url):    headers = {'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36'}    url = urllib.parse.quote(url, safe=string.printable)    req = Request(url, headers=headers)    page = urllib.request.urlopen(req)    html = page.read()    return htmldef getImg(html):    flag = 0    tree = lxml.html.fromstring(html)    src0 = tree.xpath('//div[@class="s-btop s-bb1 "]//p/text()')    src1 = tree.xpath('//div[@class="s-butt s-bb1"]//li[1]/text()')    src2 = tree.xpath('//div[@class="s-butt s-bb1"]//li[2]/text()')    src3 = tree.xpath('//div[@class="s-butt s-bb1"]//li[3]/text()')    src4 = tree.xpath('//div[@class="s-butt s-bb1"]//li[4]/text()')    src5 = tree.xpath('//div[@class="s-butt s-bb1"]//li[5]/text()')    src6 = tree.xpath('//div[@class="s-butt s-bb1"]//li[6]/text()')    while flag < len(src0):        with open('ganji_info.txt', 'ab') as jb:            jb.write(src0[flag].encode('utf8'))            jb.write("\t".encode('utf8'))            jb.write(src1[flag].strip().encode('utf8'))            jb.write("\t".encode('utf8'))            jb.write(src2[flag].encode('utf8'))            jb.write("\t".encode('utf8'))            jb.write(src3[flag].encode('utf8'))            jb.write("\t".encode('utf8'))            jb.write(src4[flag].encode('utf8'))            jb.write("\t".encode('utf8'))            jb.write(src5[flag].encode('utf8'))            jb.write("\t".encode('utf8'))            jb.write(src6[flag].encode('utf8'))            jb.write("\r\n".encode('utf8'))        flag += 1if __name__ == '__main__':    str = "http://bj.ganji.com/zpruanjianhulianwang/o{}/"    pn = 1    while pn <= 300:  # 爬取300页        print('爬取第', pn, '页')        url = str.format(pn)        html = getHtml(url)        getImg(html)        pn += 1    print('爬取完成')


阅读全文
0 0
原创粉丝点击