我试着用爬行器来做,这是代码,但是爬行器没有返回结果(打开和关闭之后):
from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from torent.items import TorentItem
class MultiPagesSpider(CrawlSpider):
name = 'job'
allowed_domains = ['tanitjobs.com/']
start_urls = ['http://tanitjobs.com/browse-by-category/Nurse/?searchId=1393459812.065&action=search&page=1&view=list',]
rules = (
Rule (SgmlLinkExtractor(allow=('page=*',),restrict_xpaths=('//div[@class="pageNavigation"]',))
, callback='parse_item', follow= True),
)
def parse_item(self, response):
hxs = HtmlXPathSelector(response)
items= hxs.select('//div[@class="offre"]/div[@class="detail"]')
scraped_items =[]
for item in items:
scraped_item = TorentItem()
scraped_item["title"] = item.select('a/strong/text()').extract()
scraped_items.append(scraped_item)
return items
@paul t.在上面的评论中说了什么,但是另外您需要返回
scraped_items
而不是items
,否则您会得到大量的错误,如下所示:相关问题 更多 >
编程相关推荐