文章目录
使用Scrapy+ Selenium+ChromeDriver爬取简书所有文章
创建项目
scrapy startproject jianshu
创建爬虫
scrapy genspider -t crawl jianshu “jianshu.com”
运行爬虫
scrapy crawl jianshu
或者创建start.py文件
from scrapy import cmdline
cmdline.execute('scrapy crawl jianshu'.split())
然后使用PyCharm打开项目
下面开始编写代码
1、jianshu.py
# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import re
import requests
import json
from jianshu_spider.items import JianshuSpiderItem
class JianshuSpider(CrawlSpider):
name = 'jianshu'
allowed_domains = ['jianshu.com']
start_urls = ['https://www.jianshu.com/']
rules = (
# 编写匹配规则,简书文章的url只有中间12位不一样。
Rule(LinkExtractor(allow=r'.*/p/[0-9a-z]{12}.*'), callback='parse_detail', follow=True),
)
def parse_detail(self, response):
title = response.xpath('//h1[@class="_1RuRku"]/text()').get()
author = response.xpath('//span[@class = "FxYr8x"]/a/text()').get()
head_profile = response.xpath('//img[@class = "_13D2Eh"]/@src').get()
pub_time = response.xpath('//time/text()').get()
read_count = re.search(r'\d+',response.xpath('//div[@class = "s-dsoj"]/span[last()]').get()).group()
work_count = re.search(r'\d+',response.xpath('//div[@class = "s-dsoj"]/span[last()-1]').get()).group()
content = response.xpath('//article').get()
subjects_id = re.search(r'\d+',response