城市电影广州今日电影网址:
http://guangzhou.movie.iecity.com/FilmList.html
①items.py:定义爬取项目,添加类成员
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class TodaymovieItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
movieTitle = scrapy.Field() #影片名称
movieType = scrapy.Field() #影片类型
moviePf = scrapy.Field() #影片评分
movieUrl = scrapy.Field() #影片地址
②guangzhou_bot.py:定义爬取规则
# -*- coding: utf-8 -*-
import scrapy
from todaymovie.items import TodaymovieItem
from lxml import etree
class GuangzhouBotSpider(scrapy.Spider):
name = 'guangzhou_bot'
# 爬虫的名字
allowed_domains = ['guangzhou.movie.iecity.com']
# 爬虫的域范围
start_urls = ['http://guangzhou.movie.iecity.com/FilmList.html']
# 爬取的起始页,城市电影网广州今日电影的网址
def parse(self, response):
# pass
movielist = response.xpath('//*[@id="Left"]/div[2]/div/ul/li').extract()
items = []
# define spider role
role_url = '//a[1]/@href'
role_title = '//div[@class="MovieTitle clearfix"]//*[@itemprop="name"]/text()'
role_pf = '//div[@class="MovieTitle clearfix"]//*[@class="pf"]/text()'
role_type = '//div[@class="MovieDetail"]/text()'
# begin crawling
for movie in movielist:
# movie <class 'str'>
tree = etree.HTML(movie)
item = TodaymovieItem()
item['movieTitle'] = tree.xpath(role_title)[0]
item['movieType'] = tree.xpath(role_type)[0].replace('\r\n',' ')
item['moviePf'] = 'None' if len(tree.xpath(role_pf))==0 else tree.xpath(role_pf)[0]
item['movieUrl'] = 'http://guangzhou.movie.iecity.com/' +\
tree.xpath(role_url)[0]
items.append(item)
return items
③pipeline.py:保存爬取结果
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from time import strftime,localtime
import codecs
class TodaymoviePipeline(object):
def process_item(self, item, spider):
movie_today = strftime('%y-%m-%d',localtime())
# 以年月日为文件名,将内容以追加方式,写入文件中
filename = 'city_guangzhou_' + movie_today + '.csv'
# spider文件返回的是一个列表,而这里只能一个一个item写入
with codecs.open(filename,'a+','utf-8') as fp:
fp.write('%s,%s,%s,%s\n'%
(item['movieTitle'],
item['movieType'],
item['moviePf'],
item['movieUrl']))
return item
④settings.py:分派任务,使pipeline生效
取消ITEM_PIPELINES的注释:
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
'todaymovie.pipelines.TodaymoviePipeline': 300,
}
# 键是用来处理结果的类,值是这个类执行的顺序,数值越小,越先被执行