我刚刚开始涉足python,目的是从一个网站上获取数据,我发现了一个关于网站认证的蹩脚教程,并有了一个计划,但不幸的是,它没有做到它在tin上说的那样。登录页面似乎没有填充用户名或密码,我希望有人可以看看代码,也许可以指出我的错误,并提供一些帮助。这是代码:-在
from scrapy.spiders.init import InitSpider
from scrapy.http import Request, FormRequest
from scrapy.linkextractors.sgml import SgmlLinkExtractor
from scrapy.spiders import Rule
class controlantSpider(InitSpider):
name = 'controlant'
allowed_domains = ['controlant.com']
login_page = 'https://grp.controlant.com/user/login?redirect=%2f'
start_urls = ['https://grp.controlant.com/group',
'https://grp.controlant.com/webforms/Admin/Overview.aspx']
rules = (
Rule(SgmlLinkExtractor(allow=r'-\w+.html$'),
callback='parse_item', follow=True),
)
def init_request(self):
"""This function is called before crawling starts."""
return Request(url=self.login_page, callback=self.login)
def login(self, response):
"""Generate a login request."""
return FormRequest.from_response(response,
formdata={'username': 'username', 'password': 'password'},
callback=self.check_login_response)
def check_login_response(self, response):
"""Check the response returned by a login request to see if we are
successfully logged in.
"""
if "Hi wessex@alliance" in response.body:
self.log("Successfully logged in. Let's start crawling!")
# Now the crawling can begin..
self.initialized()
else:
self.log("Bad times :(")
# Something went wrong, we couldn't log in, so nothing happens.
def parse_item(self, response):
filename = response.url.split("/")[-2] + '.html'
with open(filename, 'wb') as f:
f.write(response.body)
目前没有回答
相关问题 更多 >
编程相关推荐