import requests
import random
import re
import time
import os
user_list = ["Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/534.7 (KHTML, like Gecko) Chrome/7.0.514.0 Safari/534.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/9.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.14 (KHTML, like Gecko) Chrome/10.0.601.0 Safari/534.14",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/534.20 (KHTML, like Gecko) Chrome/11.0.672.2 Safari/534.20",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.27 (KHTML, like Gecko) Chrome/12.0.712.0 Safari/534.27",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/13.0.782.24 Safari/535.1",
"Mozilla/5.0 (Windows NT 6.0) AppleWebKit/535.2 (KHTML, like Gecko) Chrome/15.0.874.120 Safari/535.2",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.7 (KHTML, like Gecko) Chrome/16.0.912.36 Safari/535.7",
"Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.9.0.10) Gecko/2009042316 Firefox/3.0.10",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-GB; rv:1.9.0.11) Gecko/2009060215 Firefox/3.0.11 (.NET CLR 3.5.30729)",
"Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.6) Gecko/20091201 Firefox/3.5.6 GTB5",
"Mozilla/5.0 (Windows; U; Windows NT 5.1; tr; rv:1.9.2.8) Gecko/20100722 Firefox/3.6.8 ( .NET CLR 3.5.30729; .NET4.0E)",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1"]
pattern = 'file="(https://.*?jpg)"'
name_pat = '<span id="thread_subject">(.*?)</span>'
headers = {
'User-Agent':random.choice(user_list)
}
for i in range(200,305):
try:
print('第'+str(i)+'次爬取')
url = "http://thz7.cc/thread-2085"+str(i)+"-1-1.html"
r = requests.get(url=url,headers=headers)
time.sleep(random.random()*3)
web = r.text
name = re.compile(name_pat).findall((web))
name = name[0]
path = 'D:/HTU/'+ name
os.makedirs(path)
imageurl = re.compile(pattern).findall((web))
imageurl = list(set(imageurl))
for j in range(0, len(imageurl)):
print('图'+str(j+1))
image = imageurl[j]
response = requests.get(image, headers=headers)
file = 'D:/HTU/'+ name+'/'+str(j+1)+'.jpg'
with open (file,'wb') as f:
f.write(response.content)
except Exception as e:
print(e)
2019-12-08 Python3 爬取THZ的图片
最新推荐文章于 2023-10-15 11:24:01 发布
本文详细介绍了使用Python3进行网络爬虫,目标是抓取THZ网站上的图片资源。通过requests库获取网页源码,再利用BeautifulSoup解析HTML,找出图片链接,最后使用os模块下载图片,实现自动化下载过程。
部署运行你感兴趣的模型镜像
您可能感兴趣的与本文相关的镜像
Python3.8
Conda
Python
Python 是一种高级、解释型、通用的编程语言,以其简洁易读的语法而闻名,适用于广泛的应用,包括Web开发、数据分析、人工智能和自动化脚本
2万+

被折叠的 条评论
为什么被折叠?



