Scrapy不爬网所有的起始网址

2024-09-25 08:30:13 发布

您现在位置:Python中文网/ 问答频道 /正文

我有一个约2211开始网址和抓取一些,但不是全部。 当我将start_url设置为单个url时,它会对url进行爬网,如果我的url在一个大列表中,scrapy不会爬网。在

是否对start_urls设置了限制?在

我的代码:

from pymongo import MongoClient
import re
from scrapy.selector import Selector
#from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor

from mongo.items import MongoItem
import scrapy
import json
from scrapy.http import Request
from bs4 import BeautifulSoup as BS


uri = "mongodb://asdf@asdf.ac.commerce.com:23423423/"
client = MongoClient(uri)
db = client['page_content']
collection3 = db['category_page_content']
copyblocks3 = collection3.distinct('cwc')
copyblockss = str(copyblocks3)

hrefs = re.findall(r'href=[\'"]?([^\'" >]+)', copyblockss)

class MongoSpider(scrapy.Spider):
    name = "collections3"
    allowed_domains = ["www.ecommerce.com"]
    handle_httpstatus_list = [502, 503, 504, 400, 408, 404]
    start_urls = hrefs

    def parse(self, response):
        hxs = Selector(response)
        sites = response.selector.xpath('//html')
        items = []

        if response.status == 404:
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        elif hxs.xpath('/html/head/title/text()[contains(.,"invalid")]'):
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        elif hxs.xpath('//head/link[@rel="canonical"]/@href[contains(.,"invalid-category-id")]'):
            for site in sites:
                item = MongoItem()
                item['url'] = response.url
                item['status'] = response.status
                item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                items.append(item)

                htmlvar = item['original_url']
                change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                alldata = dict()
                cwcblockdic = ""
                for a in change_list:
                    alldata.update(a)
                ids = alldata['_id']
                cwcblock = alldata['cwc']
                cwcblockdic = cwcblockdic + cwcblock

                soup = BS(cwcblockdic)
                wholehref = soup.find(href=htmlvar)
                try:
                    anchortext = soup.findAll(href=htmlvar)[0].text
                except:
                    anchortext = wholehref.get_text()
                soup.find(href=htmlvar).replaceWith(anchortext)
                soup = str(soup)
                newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                print "this is the anchor:", anchortext
                print "this is the href:", wholehref
                print "this is newlist:", newlist
                print "this is the id:", ids
                print "this is pagetype: CP"

                for item in change_list:
                    item['cwc'] = newlist
                    collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
            return items

        else:
            if hxs.xpath('//*[@class="result-summary-container"]/text()[contains(.,"Showing 0 of")]'):
                for site in sites:
                    item = MongoItem()
                    item['url'] = response.url
                    item['status'] = response.status
                    item['original_url'] = response.meta.get('redirect_urls', [response.url])[0]
                    items.append(item)

                    htmlvar = item['original_url']
                    change_list = list(collection3.find({"cwc":{"$regex":htmlvar}}))

                    alldata = dict()
                    cwcblockdic = ""
                    for a in change_list:
                        alldata.update(a)
                    ids = alldata['_id']
                    cwcblock = alldata['cwc']
                    cwcblockdic = cwcblockdic + cwcblock

                    soup = BS(cwcblockdic)
                    wholehref = soup.find(href=htmlvar)
                    try:
                        anchortext = soup.findAll(href=htmlvar)[0].text
                    except:
                        anchortext = wholehref.get_text()
                    soup.find(href=htmlvar).replaceWith(anchortext)
                    soup = str(soup)
                    newlist = soup.replace('<html><body>', '').replace('</body></html>','')

                    print "this is the anchor:", anchortext
                    print "this is the href:", wholehref
                    print "this is newlist:", newlist
                    print "this is the id:", ids
                    print "this is pagetype: CP"

                    for item in change_list:
                        item['cwc'] = newlist
                        collection3.update({'_id':ids}, {"$set":{"cwc":item['cwc']}}, upsert=False)
                return items

Tags: urlisresponsethisitemlisthrefprint
2条回答

这可能是其中一个原因,但仍然有效:URL列表中存在重复的URL:

>>> urls = [...]  # list of urls you've posted
>>> len(urls)
2221
>>> len(set(urls))
1177

默认情况下,Scrapy过滤重复请求。在

如果你正在使用

def start_requests(self):

    ''' Your start url logic '''

    yield scrapy.Requests(url=url, callback=self.parse, dont_filter=True)

相关问题 更多 >