知网抓取失败案例

来源:互联网 发布:adobe ld cc mac 编辑:程序博客网 时间:2024/04/29 11:03
#!/usr/bin/python#encoding=utf-8__author__ = 'Administrator'from  bs4 import  BeautifulSoupimport seleniumimport sysimport urllibimport requestsimport timeimport reif __name__ == "__main__":    import os    from selenium import webdriver    from selenium.webdriver.support.ui import WebDriverWait    headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}    chromedriver = "/home/henson/Documents/pycharm/webdriver/chromedriver"    os.environ["webdriver.chrome.driver"] = chromedriver    driver = webdriver.Chrome(chromedriver)    driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")    driver.get('http://kns.cnki.net/kns/brief/result.aspx?dbprefix=CJFQ')    #inputElement = driver.find_element_by_xpath("//*[@id='b_Text0']")#//*[@id="b_Text0"]    inputElement = driver.find_element_by_name("txt_1_value1")    #inputElement = driver.find_element_by_onkeypress("EnteryKeyno(event)")    searchWord="水"    inputElement.send_keys((searchWord))    driver.find_element_by_xpath("//*[@id='ddSubmit']/span").click()  #检索    driver.find_element_by_xpath("//*[@id='btnSearch']").click()    time.sleep(2)    currentURL=driver.current_url    urlList=[]    localDir = '/home/henson/Downloads/paper'    driver.find_element_by_xpath("//*[@id='XuekeNavi_Div']/div[1]/input[1]").click() #清除    currentURL = driver.current_url    #bbc = driver.find_element_by_xpath(     #   "//*[@id='group']").text    #print(bbc)    #driver.find_element_by_xpath("//*[@id='B']/span").click()  #第1    #driver.find_element_by_xpath("//*[@id='Bchild']/dd[14]/span") #第二    #driver.find_element_by_xpath("//*[@id='B027second']")    #driver.find_element_by_xpath("//*[@id='B027child']/dd[1]/a").click()   # driver.find_element_by_xpath("//*[@id='Bsecond']").click()#    driver.find_element_by_id("btnSearch").click()#    driver.find_element_by_xpath("//*[@id='B027child']/dd[1]/").click()   # bbc=driver.find_element_by_xpath ("//*[@id='ctl00']/table/tbody/tr[2]/td/table/tbody/tr[2]/td[2]/[@class='fz14']").    #print(bbc)    #print(data)    #html = urllib.request.urlopen(req)    #html = driver.find_element_by_xpath("//*[@id='result_divlist']/dl[4]/dd[4]").text    #print(currentURL)    req = urllib.request.Request(url=currentURL, headers=headers)    html = urllib.request.urlopen(currentURL).read()    r = requests.get(currentURL)    data = r.text    link_list = re.findall(r"(?<=href=\").+?(?=\")|(?<=href=\').+?(?=\')", data)    #for url in link_list:      #  print(url)    #html = urllib.request.urlopen(req)    #doc = html.read().decode('utf-8')    #print(doc)    soup = BeautifulSoup(html, "html.parser")    #print(soup.prettify())    pagesum_text = soup.find('a').get_text()    print(pagesum_text)    titel=soup.find_all(re.compile("^a"))#参考别人的知网bs用法    k=1    for j in  range (1,21):        #driver.find_element_by_xpath("/html/body/div/div[2]/div/div[5]/div[2]/div/div/div/div[3]/div[2]/a["+str(k)+"]").click()        #time.sleep(2)        #currentURL = driver.current_url       # print(currentURL)        #print("NO."+str(j)+"页")        if k==11:            k=3        else:k=k+1        for i in range(1, 100):            try:                number = driver.find_element_by_xpath(                    "//*[@id='ctl00']/table/tbody/tr[2]/td/table/tbody/tr[2]/td[1]").text                titel =driver.find_element_by_xpath(                    "//*[@id='ctl00']/table/tbody/tr[2]/td/table/tbody/tr[2]/td[2]/a").text                author= driver.find_element_by_xpath(                    "//*[@id='ctl00']/table/tbody/tr[2]/td/table/tbody/tr[2]/td[3]").text                cites = driver.find_element_by_xpath(                    "//*[@id='ctl00]/table/tbody/tr[2]/td/table/tbody/tr[2]/td[7]/span/a").text                print(number)                print(author)                print(cites)            except Exception:                try:                    number = driver.find_element_by_xpath(                        "//*[@id='ctl00']/table/tbody/tr[2]/td/table/tbody/tr[2]/td[2]/a").text                    author = driver.find_element_by_xpath(                        "//*[@id='ctl00']/table/tbody/tr[2]/td/table/tbody/tr[2]/td[3]").text                    cites = driver.find_element_by_xpath(                        "//*[@id='ctl00]/table/tbody/tr[2]/td/table/tbody/tr[2]/td[7]/span/a").text                    continue;                except Exception:                    break;                break;"""分开获取j(1-21)  :  k=1 ,    k=11 j(19-n)  :  k=12,    k=10 """

本以为知网和维普没差,结果是我太天真了。
Q1:对应下拉的选项不知道如何勾选跳转
Q2:在知网上想要抓取的页面信息,通过获取的Xpath(也许我太依赖它了,不过定位真的很方便)无法抓取到相应的信息,还出现Element匹配错误,尝试了前后几级的节点,但依然出现匹配错误,这两天一直在调试这个,开始怀疑人生了。
Q3:以前抓取静态的新闻页面 内容的时候没感觉BS4很难搞,现在怎么一直不会用了,一个节点标签都匹配不上,对BS4无爱了。
Q4:。。。。。。。。
没有效率,耗着时间,心好累,而且思路都没有,这才是最可怕的。

原创粉丝点击