#知乎专栏文章采集
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
import json
import re
import os
def hqlj(url):
headers={
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',
}
#url="https://zhuanlan.zhihu.com/api/columns/cun-design/articles?include=data%5B%2A%5D.admin_closed_comment%2Ccomment_count%2Csuggest_edit%2Cis_title_image_full_screen%2Ccan_comment%2Cupvoted_followees%2Ccan_open_tipjar%2Ccan_tip%2Cvoteup_count%2Cvoting%2Ctopics%2Creview_info%2Cauthor.is_following%2Cis_labeled%2Clabel_info&limit=10&offset=20"
#url="https://zhuanlan.zhihu.com/cun-design"
html=requests.get(url,headers=headers)
reqs=requests.get(url,headers=headers).json()
print(reqs)
print(html)
html.encoding="utf-8"
print(html.text)
html=json.loads(html.text)
print(html)
print(html['paging'])
print(html['data'])
urls=[]
for re in html['data']:
#print(re['excerpt'])
print (re['url'])
urls.append(re['url'])
return urls
def hqnr(url):
headers={
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'
}
#url="https://zhuanlan.zhihu.com/p/59011185"
html=requests.get(url,headers=headers).text
print(html)
soup=BeautifulSoup(html,'lxml')
#获取标题
h1=soup.h1.text
h1 = re.sub(r'[\|\/\<\>\:\*\?\\\"]', "_", h1) # 剔除不合法字符
print(h1)
os.makedirs(f'./zhihu/{h1}/',exist_ok=True)
#获取源码
div=soup.find(class_="RichText ztext Post-RichText")
div=str(div)
print(div)
with open(f'./zhihu/{h1}/h1.txt','w',encoding='utf-8') as f:
f.write(div)
print(f"保存{h1}源代码成功!")
#获取文本内容
con=soup.find(class_="RichText ztext Post-RichText").find_all('p')
print(con)
for i in con:
print(i.get_text()) #获取标签内文本
#获取图片
img=soup.find(class_="RichText ztext Post-RichText").find_all('img')
print(img)
imgurls=[]
for imgurl in img:
print(imgurl)
imgurl=imgurl['data-original']
print(imgurl)
imgurls.append(imgurl)
#图片地址去重
img_urls=[]
'''for urls in imgurls:if not urls in img_urls:img_urls.append(urls)'''
[img_urls.append(urls) for urls in imgurls if not urls in img_urls]
#print(img_urls)
for img_url in img_urls:
print(img_url)
img_name=img_url[-10:]
print(img_name)
r=requests.get(img_url,headers=headers)
with open(f'./zhihu/{h1}/{img_name}','wb')as f:
f.write(r.content)
print(f"保存{img_name}图片成功!")
print(f"保存{h1}所有图片成功!")
#hqlj("https://zhuanlan.zhihu.com/api/columns/cun-design/articles?include=data%5B%2A%5D.admin_closed_comment%2Ccomment_count%2Csuggest_edit%2Cis_title_image_full_screen%2Ccan_comment%2Cupvoted_followees%2Ccan_open_tipjar%2Ccan_tip%2Cvoteup_count%2Cvoting%2Ctopics%2Creview_info%2Cauthor.is_following%2Cis_labeled%2Clabel_info&limit=10&offset=20")
hqnr("https://zhuanlan.zhihu.com/p/59964490")
'''if __name__ == '__main__':lj=hqlj("https://zhuanlan.zhihu.com/api/columns/cun-design/articles?include=data%5B%2A%5D.admin_closed_comment%2Ccomment_count%2Csuggest_edit%2Cis_title_image_full_screen%2Ccan_comment%2Cupvoted_followees%2Ccan_open_tipjar%2Ccan_tip%2Cvoteup_count%2Cvoting%2Ctopics%2Creview_info%2Cauthor.is_following%2Cis_labeled%2Clabel_info&limit=10&offset=20")for url in lj:print(url)hqnr(url)'''