-
selenium是浏览器测试自动化工具,很容易完成鼠标点击,翻页等动作,确定是一次只能加载一个页面,无法异步渲染页面,也就限制了selenium爬虫的抓取效率。
-
splash可以实现异步渲染页面,可以同时渲染几个页面。缺点是在页面点击,,模拟登陆方面没有selenium灵活。
1. 安装 docker和splash
可以看我之前写的 如何安装并启动docker和splash
https://www.cnblogs.com/zichliang/p/15796638.html
注意:!!
#root用户开放8050端口
[root@wzy_woyun ~]# firewall-cmd --permanent --add-port=8050/tcp
success
[root@wzy_woyun ~]# firewall-cmd --reload
Success
普通的python 动态lua脚本
- 添加请求头 请求url
function main(splash,args)
local url=args.url
splash:set_user_agent("Mozilla/5.0Chrome/69.0.3497.100Safari/537.36")
splash:go(url)
splash:wait(2)
splash:go(url)
return{
html=splash:html(),
png = splash:png()
}
end
- 通过滑动 来完成动态加载
function main(splash, args)
splash:go(args.url)
local scroll_to = splash:jsfunc("window.scrollTo")
scroll_to(0, 2800)
splash:set_viewport_full()
splash:wait(5)
return {html=splash:html()}
end
结合scarpy 来使用首先需要在settings中添加
SPLASH_URL = 'http://192.168.2.55:8050/'
DOWNLOADER_MIDDLEWARES = {
'curreny.middlewares.ProcessAllException': 200,
'curreny.middlewares.CurrenyDownloaderMiddleware': 543,
'scrapy_splash.SplashCookiesMiddleware': 723,
'scrapy_splash.SplashMiddleware': 725,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,
}
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
# 缓存
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
然后 在spider中添加lua脚本
"""
平潭综合实验区人民政府
"""
import copy
import re
import time
import scrapy
import scrapy_splash
from curreny.items import CurrenyItem
class PingtancomprehensiveexperimentgovproSpider(scrapy.Spider):
name = 'PingTanComprehensiveExperimentGovPro'
# allowed_domains = ['xxx.com']
start_urls = ['http://www.pingtan.gov.cn/jhtml/cn/8423']
def start_requests(self):
lua="""
function main(splash, args)
splash.images_enabled = false
assert(splash:go(args.url))
assert(splash:wait(1))
js = string.format("document.querySelector('body > div.container > div.main.clearfix > div > div.page > span:nth-child(4) > a').click();", args.page)
splash:runjs(js)
assert(splash:wait(5))
return splash:html()
end
"""
url="http://www.pingtan.gov.cn/jhtml/cn/8423"
for page in range(1,105):
yield scrapy_splash.SplashRequest(
url=url,
endpoint="execute",
args={
"url":url,
"lua_source":lua,
"page":page,
"wait":1
},
callback=self.parse
)
def parse(self, response,**kwargs):
item = CurrenyItem()
for li in response.css("body > div.container > div.main.clearfix > div > div.info_list.list > ul > li"):
item["title_url"] = 'http://www.pingtan.gov.cn' + str(li.css("a::attr(href)").get())
item["title_name"] = li.css("a::attr(title)").get()
item["title_date"] = li.css("span::text").get()
yield scrapy.Request(
url=item['title_url'],
callback=self.parse_detail,
meta={'item': copy.deepcopy(item)}
)
# 详情页解析
def parse_detail(self, response):
item = response.meta['item']
item['content_html'] = response.css('.detail').get()
print(item['title_name'], item['title_url'], item['title_date'], )
yield item