验证过的模拟登录的方式,结合scrapy和selenium(phantom)

来源:互联网 发布:全球地表覆盖数据下载 编辑:程序博客网 时间:2024/05/01 01:05
rom scrapy.spider import BaseSpiderfrom scrapy.http import Response,FormRequest,Requestfrom scrapy.selector import HtmlXPathSelectorfrom selenium import webdriverclass MySpider(BaseSpider):    name = 'MySpider'    start_urls = ['http://my_domain.com/']    def get_cookies(self):        driver = webdriver.Firefox()        driver.implicitly_wait(30)        base_url = "http://www.my_domain.com/"        driver.get(base_url)        driver.find_element_by_name("USER").clear()        driver.find_element_by_name("USER").send_keys("my_username")        driver.find_element_by_name("PASSWORD").clear()        driver.find_element_by_name("PASSWORD").send_keys("my_password")        driver.find_element_by_name("submit").click()        cookies = driver.get_cookies()        driver.close()        return cookies    def parse(self, response,my_cookies=get_cookies):        return Request(url="http://my_domain.com/",            cookies=my_cookies,            callback=self.login)    def login(self,response):        return [FormRequest.from_response(response,            formname='login_form',            formdata={'USER': 'my_username', 'PASSWORD': 'my_password'},            callback=self.after_login)]    def after_login(self, response):        hxs = HtmlXPathSelector(response)        print hxs.select('/html/head/title').extract()
0 0
原创粉丝点击