<p>参考<a href="https://github.com/NilanshBansal/Craigslist_Scrapy/blob/master/craigslist/spiders/jobs.py" rel="nofollow noreferrer">https://github.com/NilanshBansal/Craigslist_Scrapy/blob/master/craigslist/spiders/jobs.py</a></p>
<pre><code>import scrapy
from scrapy import Request
class ConventionSpider(scrapy.Spider):
name = 'convention'
# allowed_domains = ['events.jspargo.com/ASCB18/Public/Exhibitors.aspx?sortMenu=102003']
start_urls = ['https://events.jspargo.com/ASCB18/Public/Exhibitors.aspx?sortMenu=102003']
def parse(self, response):
name = response.xpath('//*[@class="companyName"]')
number = response.xpath('//*[@class="boothLabel"]')
link = response.xpath('//*[@class="companyName"]')
for row, row1, row2 in zip(name, number, link):
company = row.xpath('.//*[@class="exhibitorName"]/text()').extract_first()
booth_num = row1.xpath('.//*[@class="boothLabel aa-mapIt"]/text()').extract_first()
url = row2.xpath('.//a/@href').extract_first()
yield Request(url,callback=self.parse_page,meta={'Url':url,'Company': company,'Booth_Number': booth_num)
def parse_page(self,response):
company = response.meta.get('Company')
booth_num = response.meta.get('Booth Number')
website = response.xpath('//a[@class="aa-BoothContactUrl"]/text()').extract_first()
yield {'Company': company,'Booth Number': booth_num, 'Website': website}
</code></pre>
<p>编辑:
<strong>注释该行允许_domains也让爬虫在其他域上工作。</strong></p>
<p>回复您在<a href="https://stackoverflow.com/a/52792350">https://stackoverflow.com/a/52792350</a>的代码</p>