import requests
import re
import json
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class WhcSpider(CrawlSpider):
name = ‘whc’
allowed_domains = [‘ygdy8.com’]
start_urls = [‘http://ygdy8.com/’]
####### 遍历全站list,url
rules = (
Rule(LinkExtractor(allow=r’index.html’,deny=‘game’),follow=True),
Rule(LinkExtractor(allow=r’/list_\d+_\d+.html’), follow=True),
Rule(LinkExtractor(allow=r’/\d+/\d+.html’), callback=‘parse_item’),
)
def parse_item(self, response):
# print(response.url)
#### 获取下载地址
ftp_url=re.findall('<a href="(.*?)">ftp',response.text)
print(ftp_url)
#### 写入本地文件
with open('result.txt','a',encoding='utf-8') as f:
f.write(json.dumps(ftp_url,ensure_ascii=False))