无法使用scrapy framework 307 redi获取myntra API数据

2024-09-30 06:22:52 发布

您现在位置:Python中文网/ 问答频道 /正文

以下是蜘蛛代码:

import scrapy
class MyntraSpider(scrapy.Spider):

    custom_settings = {
        'HTTPCACHE_ENABLED': False,
        'dont_redirect': True,
        #'handle_httpstatus_list' : [302,307],
        #'CRAWLERA_ENABLED': False,
        'USER_AGENT': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',
    }


    name = "heytest"
    allowed_domains = ["www.myntra.com"]
    start_urls = ["https://www.myntra.com/web/v2/search/data/duke"]
    def parse(self, response):
        self.logger.debug('Parsed jabong.com')

“已解析jabong.com网站“没有被记录。实际上,没有调用回调方法(parse)。请回复。在

请从scarping hub查找错误日志:

另请参见Postman screenshot


Tags: 代码importselfcomfalseparsewwwenabled
1条回答
网友
1楼 · 发布于 2024-09-30 06:22:52

我运行这段代码(只有几次),并且我没有问题获取数据。在

它看起来像你的代码,所以我不知道你为什么会有问题。在

也许他们出于某种原因阻止了你。在

#!/usr/bin/env python3

import scrapy
import json

class MySpider(scrapy.Spider):

    name = 'myspider'

    allowed_domains = ['www.myntra.com']

    start_urls = ['https://www.myntra.com/web/v2/search/data/duke']

    #def start_requests(self):
    #    for tag in self.tags:
    #        for page in range(self.pages):
    #            url = self.url_template.format(tag, page)
    #            yield scrapy.Request(url)

    def parse(self, response):
        print('url:', response.url)

        #print(response.body)

        data = json.loads(response.body)

        print('data.keys():', data.keys())

        print('meta:', data['meta'])

        print("data['data']:", data['data'].keys())

        # download files
        #for href in response.css('img::attr(href)').extract():
        #   url = response.urljoin(src)
        #   yield {'file_urls': [url]}

        # download images and convert to JPG
        #for src in response.css('img::attr(src)').extract():
        #   url = response.urljoin(src)
        #   yield {'image_urls': [url]}

#  - it runs without project and saves in `output.csv`  -

from scrapy.crawler import CrawlerProcess

c = CrawlerProcess({
    'USER_AGENT': 'Mozilla/5.0',
    #'USER_AGENT': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36',

    # save in CSV or JSON
    'FEED_FORMAT': 'csv',     # 'json
    'FEED_URI': 'output.csv', # 'output.json

    # download files to `FILES_STORE/full`
    # it needs `yield {'file_urls': [url]}` in `parse()`
    #'ITEM_PIPELINES': {'scrapy.pipelines.files.FilesPipeline': 1},
    #'FILES_STORE': '/path/to/valid/dir',

    # download images and convert to JPG
    # it needs `yield {'image_urls': [url]}` in `parse()`
    #'ITEM_PIPELINES': {'scrapy.pipelines.files.ImagesPipeline': 1},
    #'IMAGES_STORE': '/path/to/valid/dir',

    #'HTTPCACHE_ENABLED': False,
    #'dont_redirect': True,
    #'handle_httpstatus_list' : [302,307],
    #'CRAWLERA_ENABLED': False,
})
c.crawl(MySpider)
c.start()

相关问题 更多 >

    热门问题