2019.1.23 今天成功的把头条用scrapy crawl爬了出来,实际不难

本文介绍了一个使用Scrapy框架实现的头条新闻爬虫项目,详细展示了爬虫的设置、中间件、管道以及爬取规则。爬虫针对特定关键词进行搜索,如APEX,并能抓取文章标题和内容。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

toutiao_spider

# -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
import re
import time

class ToutiaoSpiderSpider(CrawlSpider):
    name = 'Toutiao_Spider'
    allowed_domains = ['toutiao.com','']
    start_urls = ['https://www.toutiao.com/search/?keyword=APEX'] #这里写你要搜索的内容,APEX就是我要所搜爬取的内容




    rules = (
        Rule(LinkExtractor(allow=r'.*/group/[0-9a-z]{19}/$'), callback='parse_detail', follow=False),
    )

    def parse_detail(self, response):



        article = response.xpath("//div[@class='article-box']")
        article1 = response.xpath("//div[@class='article-box']").get()

        title = article.xpath("//h1//text()").get()
        content=article.xpath("//div[@class='article-box']/div[@class='article-content']//text()").getall()




        item = {

                'title': title,
                'content': content,


            }
        yield item




item

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy


class JinritoutiaoSpiderItem(scrapy.Item):
    title=scrapy.Field()
    content=scrapy.Field()
    article1=scrapy.Field()


middleware



from scrapy import signals
from selenium import webdriver
from lxml import etree
import time
from scrapy.http.response.html import HtmlResponse

class SeleniumDownloadMiddleware(object):
    def __init__(self):
        self.driver = webdriver.Chrome(executable_path=r"G:\share\chromedriver_win32\chromedriver.exe")

    def process_request(self,request,spider):
        url = request.url

        # url2 ='https://www.toutiao.com/'

        self.driver.get(url)

        source = self.driver.page_source
        #浏览器打开页面
        # if url == url2:
        #     try:
        #         """ 模拟滚动到底部加载下一页,一共循环6次,每次等待4s,这个时间根据网速调整,如果你需要获取更多,那么把循环的范围加大就行了。
        #         """
        #         for i in range(1, 2):
        #             self.driver.execute_script('window.scrollTo(0, document.body.scrollHeight)')
        #             time.sleep(3)
        #     except:
        #         pass


        # 对啊,我里面也可以爬,外面也可以爬,但是隔离了不知道怎么连贯起来了

        response = HtmlResponse(url=self.driver.current_url, body=source, request=request, encoding='utf-8')

        return response

pipelines

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html

from scrapy.exporters import CsvItemExporter
class ToutiaoprojectPipeline(object):
    def open_spider(self,spider):
        self.csv_file=open('toutiao.csv','wb')
        self.csv_exporter =CsvItemExporter(self.csv_file)
        self.csv_exporter.start_exporting()

    def process_item(self,item,spider):
        self.csv_exporter.export_item(item)
        return item
    def close_spider(self,spider):
        self.csv_exporter.finish_exporting()

        self.csv_file.close()

setting

# -*- coding: utf-8 -*-

# Scrapy settings for Jinritoutiao_spider project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'Jinritoutiao_spider'

SPIDER_MODULES = ['Jinritoutiao_spider.spiders']
NEWSPIDER_MODULE = 'Jinritoutiao_spider.spiders'


# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'Jinritoutiao_spider (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 1
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
  'Accept-Language': 'en',
    'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36'

}
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {
#    'Jinritoutiao_spider.middlewares.SeleniumDownloadMiddleware': 100,
# }

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
DOWNLOADER_MIDDLEWARES = {
   'Jinritoutiao_spider.middlewares.SeleniumDownloadMiddleware': 500,
}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'Jinritoutiao_spider.pipelines.ToutiaoprojectPipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

start

from scrapy import cmdline

cmdline.execute("scrapy crawl Toutiao_Spider".split())
评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值