斗鱼tv爬虫

来源:互联网 发布:导航网源码大全 编辑:程序博客网 时间:2024/04/27 15:21

代码写得很烂,将就着看看吧

import osimport requestsfrom bs4 import BeautifulSoupdef download(path, url):    try:        r = requests.get(url)        r.raise_for_status        with open(path, 'wb') as f:            f.write(r.content)        return True    except:        return Falsedef get(url):    try:        r = requests.get(url)        r.raise_for_status        r.encoding = 'utf-8'    except:        return False    soup = BeautifulSoup(r.text, 'html.parser')    title = soup.find('p', {'class':'listcustomize-topcon-p'})    floder = title['title']    print(floder)    if not os.path.exists(floder):        os.mkdir(floder)    os.chdir('./' + floder)    print("当前工作目录为 : %s" % os.getcwd())    position = soup.find('div', attrs = {'id':'live-list-content'})    tags = position.find_all('li')    infos = []    errors = []    for tag in tags:        info = []        name = tag.a.find('span', attrs = {'class':'dy-name ellipsis fl'})        if not name:            errors.append(tag.a.img['data-original'])            continue        info.append(name.string)        info.append(tag.a['data-rid'])        info.append(tag.a.img['data-original'])        path = './' + info[0] + ':' + info[1] + '.' + info[2].split('.')[-1]        if download(path, info[-1]):            print(info)        else:            errors.append(info[-1])    return errorsurl = 'https://www.douyu.com/directory/game/How'errors = get(url)print("error url:", errors)
运行结果:


原创粉丝点击