# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class wiwjSpider(scrapy.Spider):
name = "wiwjSpider"
allowed_domains = ["5i5j.com"]
start_urls = (
"https://passport.5i5j.com/passport/login?service=https%3A%2F%2Fhz.5i5j.com%2Freglogin%2Findex%3FpreUrl%3Dhttps%253A%252F%252Fhz.5i5j.com%252F&status=1&city=hz",
)
content_url = LinkExtractor(allow=r'\d+.html')
rules = (
Rule(content_url,callback='parse',follow=True),
)
# 处理start_urls里的登录url的响应内容,提取登陆需要的参数(如果需要的话)
def parse(self, response):
# 发送请求参数,并调用指定回调函数处理
yield scrapy.FormRequest.from_response(
response,
formdata = {"username" : "18757199154", "password" : "110110"},
callback = self.parse_page
)
# 获取登录成功状态,访问需要登录后才能访问的页面
def parse_page(self, response):
url = "https://hz.5i5j.com/ershoufang/n"
all = (url + str(p) + '/' for p in range(2,90))
for A in all:
yield scrapy.Request(A, callback = self.parse_newpage)
# 处理响应内容
def parse_newpage(self, response):
print response.body