不太了解scrapy框架的可以看看

本文带你了解scrapy框架,包括基本数据模型,以及如何在pipelines.py中配置MongoDB数据库,items.py中定义字段,和settings.py的配置。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

带你了解scrapy框架

这是scrapy的基本数据模型

在这里插入图片描述

这是我个人写的一个小实例 小白可以看一下基本流程

# -*- coding: utf-8 -*-
import scrapy
from ..items import BokeItem


class BkSpider(scrapy.Spider):
    name = 'bk'
    # allowed_domains = ['bai.com']
    start_urls = ['https://www.oschina.net/blog']

    def img_parse(self, response):
        item = response.meta['item']
        with open('./img/%s' % item['imgname'], 'wb') as f:
            f.write(response.body)
        yield item

    def blog_parse(self, response):
        typ = response.meta['typ']
        div_list = response.xpath('//div[contains(@class,"blog-item")]')
        for div in div_list:
            try:
                title = div.xpath('./div/a/@title').extract_first()
                brief = div.xpath('./div/div[1]/p/text()').extract_first()
                author = div.xpath('.//div[@class="extra"]//div[@class="item"][1]/a/text()').extract_first()
                date = div.xpath('.//div[@class="extra"]//div[@class="item"][2]/text()').extract_first()
                readt = div.xpath('.//div[@class="extra"]//div[@class="item"][3]/text()').extract_first()
                comment = div.xpath('.//div[@class="extra"]//div[@class="item"][4]/a/text()').extract_first()
                like = div.xpath('.//div[@class="extra"]//div[@class="item"][5]/text()').extract_first()
                imgurl = div.xpath('./a/img/@src').extract_first()
                if imgurl.endswith('.jpg') or imgurl.endswith('.png') or imgurl.endswith('.gif'):
                    imgname = imgurl.split('/')[-1]
                    item = BokeItem()
                    item['title'] = title
                    item['brief'] = brief
                    item['author'] = author
                    item['date'] = date
                    item['readt'] = readt
                    item['comment'] = comment
                    item['like'] = like
                    item['typ'] = typ
                    item['imgname'] = imgname
                    yield scrapy.Request(url=imgurl, callback=self.img_parse, meta={'item': item})
            except:
                continue

    def parse(self, response):
        typ_url = response.xpath('//*[@id="mainScreen"]/div/div[1]/div/div[1]/div[1]/a[position()>1]/@href').extract()
        typ_text = response.xpath('//*[@id="mainScreen"]/div/div[1]/div/div[1]/div[1]/a[position()>1]/text()').extract()
        for durl in typ_url:
            yield scrapy.Request(url=durl, callback=self.blog_parse, meta={'typ': typ_text[typ_url.index(durl)]})

在pipelines.py中配置mongdb数据库

import pymongo


class BokePipeline(object):
    conn = pymongo.MongoClient()
    db = conn.库名
    table = db.表名

    def process_item(self, item, spider):
        self.table.insert_one(dict(item))
        return item

在items.py中配置需要的字段

import scrapy


class BokeItem(scrapy.Item):
    # define the fields for your item here like:
    title = scrapy.Field()
    brief = scrapy.Field()
    author = scrapy.Field()
    date = scrapy.Field()
    readt = scrapy.Field()
    comment = scrapy.Field()
    like = scrapy.Field()
    typ = scrapy.Field()
    imgname = scrapy.Field()

在settings.py配置一些数据

USER_AGENT = 'boke (+http://www.yourdomain.com)'      #设置ua
ROBOTSTXT_OBEY = False     #改为False

#开启管道
ITEM_PIPELINES = {
   'boke.pipelines.BokePipeline': 300,
}
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值