1,建立项目
Scrapy startproject book
cd book
scrapy genspider -t crawl amazon amazon.cn
2, 与scrapy_redis不同的是继承的类不同
from scrapy_redis.spiders import RedisCrawlSpider
from scrapy.spiders import CrawlSpider, Rule
class AmazonSpider(RedisCrawlSpider):
name = 'amazon'
allowed_domains = ['amazon.cn']
# start_urls = ['https://www.amazon.cn/b/?&node=116169071&tag=baiduiclickcn-23&ref=DEP_190527_lhb_06']
redis_key = 'amazon'
rules = (
# 匹配大分类的url地址和小分类的url地址
Rule(LinkExtractor(
restrict_xpaths=("//ul[@class='a-unordered-list a-nostyle a-vertical s-ref-indent-two']/ul/li"), ),
follow=True),
# 匹配图书
Rule(LinkExtractor(restrict_xpaths=("//div[@id='mainResults']/ul/li//h2/.."), ), callback='parse_book_detail'),
# 翻页
Rule(LinkExtractor(restrict_xpaths=("//div[@id='pagn']"), ), follow=True)
)
def parse_book_detail(self, response):
item = {}
item["book_title"] = response.xpath("//span[@id='productTitle']/text()").extract_first()
item["book_author"] = response.xpath("//div[@id='bylineInfo']/span[1]/a/text()").extract_first()
item["book_img"] = response.xpath("//div[@id='ebooks-img-canvas']/img/@src").extract_first()
item["book_price"] = response.xpath("//span[@class='a-color-base']/span/text()").extract_first()
crawlspider rule完善:
