"中关村在线图片,汽车类的,http://desk.zol.com.cn/qiche/1.html"
import requests
from bs4 import BeautifulSoup as bs
from concurrent import futures
imgnum = 0
base_url = "http://desk.zol.com.cn/bizhi"
def get_page(url, headers, page_num):
try:
res = requests.get(url, headers=headers)
# print("正在爬取第%d页"%page_num)
return res
except RequestException:
print("无法获取到原始页面!!!")
def get_img_urls(res):
try:
res = res.text
urls = []
soup = bs(res)
links = soup.select(".photo-list-padding .pic")
for link in links:
urls.append(link["href"])
# print(link["href"])
return urls
except Exception:
print("没有抓取到下载页面!!!")
def get_img_next(img_url, headers):
base_url = "http://desk.zol.com.cn"
global imgnum
try:
res = requests.get(base_url + img_url, headers=headers).text
soup = bs(res)
img_url = soup.select("#bigImg")[0]["src"]
img = requests.get(img_url, headers=headers).content
with open("temp_img/车/%d.jpg" % imgnum, "wb") as f:
f.write(img)
imgnum += 1
next_img = soup.select("#pageNext")[0]["href"]
if next_img:
print(next_img)
return get_img_next(next_img, headers=headers)
except Exception:
print("没有下载成功")
def get_img_page(urls, headers):
try:
for url in urls:
get_img_next(url, headers=headers)
except Exception:
print("空列表")
def download_one(page_num):
headers = {
# 'Host':"desk.zol.com.cn",
'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.100 Safari/537.36"
}
url = "http://desk.zol.com.cn/qiche/" + str(page_num) + ".html"
res = get_page(url, headers=headers, page_num=page_num)
urls = get_img_urls(res)
get_img_page(urls, headers=headers)
def download_start(end_page):
workers = 100
with futures.ThreadPoolExecutor(workers) as e:
e.map(download_one, [page_num for page_num in range(1, end_page)])
download_start(48)