scrapy的应用

来源:互联网 发布:网络投稿小说网站 编辑:程序博客网 时间:2024/06/01 16:25
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.


from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector


from mySpider.items import DmozItem


class DmozSpider(BaseSpider):
    name = "dmoz"
    allowed_domains = ["dmoz.org"]
    start_urls = [
        "http://www.dmoz.org/Computers/Programming/Languages/Python/Books/",
        "http://www.dmoz.org/Computers/Programming/Languages/Python/Resources/",
        #"http://stackoverflow.com/"
    ]


    def parse(self,response):
        #sel = Selector(response)
        #sites = sel.xpath('//ul/li')
        #for site in sites:
         #   title = site.xpath('a/text()').extract()
          #  link = site.xpath('a/@href').extract()
            #desc = site.xpath('text()').extract()
          #  print title
        ##filename = response.url.split("/")[-2]
        #with open(filename,'wb') as f:
        #    f.write(response.body)
        hxs = HtmlXPathSelector(text=response.body)
        #print hxs.select('/title/text()').extract()
        items = []
        for sel in hxs.select('//ul/li'):
            item = DmozItem()
            item['title'] = sel.select('a/text()').extract()
            item['link'] = sel.select('a/@href').extract()
            item['desc']= sel.select('text()').extract()
            items.append(item)
            return items
            #print title
        #    for t in title:
        #        print t.encode('utf-8')
        
0 0