python采集文章_Python爬虫,python知乎专栏文章采集案例源码

本文介绍了一种使用Python实现的知乎专栏文章爬取方法。通过发送HTTP请求获取专栏JSON数据并解析,进一步抓取每篇文章的详细内容,包括标题、正文及图片等。此过程涉及网页解析、正则表达式及文件操作。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

#知乎专栏文章采集

# -*- coding: utf-8 -*-

import requests

from bs4 import BeautifulSoup

import json

import re

import os

def hqlj(url):

headers={

'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36',

}

#url="https://zhuanlan.zhihu.com/api/columns/cun-design/articles?include=data%5B%2A%5D.admin_closed_comment%2Ccomment_count%2Csuggest_edit%2Cis_title_image_full_screen%2Ccan_comment%2Cupvoted_followees%2Ccan_open_tipjar%2Ccan_tip%2Cvoteup_count%2Cvoting%2Ctopics%2Creview_info%2Cauthor.is_following%2Cis_labeled%2Clabel_info&limit=10&offset=20"

#url="https://zhuanlan.zhihu.com/cun-design"

html=requests.get(url,headers=headers)

reqs=requests.get(url,headers=headers).json()

print(reqs)

print(html)

html.encoding="utf-8"

print(html.text)

html=json.loads(html.text)

print(html)

print(html['paging'])

print(html['data'])

urls=[]

for re in html['data']:

#print(re['excerpt'])

print (re['url'])

urls.append(re['url'])

return urls

def hqnr(url):

headers={

'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'

}

#url="https://zhuanlan.zhihu.com/p/59011185"

html=requests.get(url,headers=headers).text

print(html)

soup=BeautifulSoup(html,'lxml')

#获取标题

h1=soup.h1.text

h1 = re.sub(r'[\|\/\<\>\:\*\?\\\"]', "_", h1) # 剔除不合法字符

print(h1)

os.makedirs(f'./zhihu/{h1}/',exist_ok=True)

#获取源码

div=soup.find(class_="RichText ztext Post-RichText")

div=str(div)

print(div)

with open(f'./zhihu/{h1}/h1.txt','w',encoding='utf-8') as f:

f.write(div)

print(f"保存{h1}源代码成功!")

#获取文本内容

con=soup.find(class_="RichText ztext Post-RichText").find_all('p')

print(con)

for i in con:

print(i.get_text()) #获取标签内文本

#获取图片

img=soup.find(class_="RichText ztext Post-RichText").find_all('img')

print(img)

imgurls=[]

for imgurl in img:

print(imgurl)

imgurl=imgurl['data-original']

print(imgurl)

imgurls.append(imgurl)

#图片地址去重

img_urls=[]

'''for urls in imgurls:if not urls in img_urls:img_urls.append(urls)'''

[img_urls.append(urls) for urls in imgurls if not urls in img_urls]

#print(img_urls)

for img_url in img_urls:

print(img_url)

img_name=img_url[-10:]

print(img_name)

r=requests.get(img_url,headers=headers)

with open(f'./zhihu/{h1}/{img_name}','wb')as f:

f.write(r.content)

print(f"保存{img_name}图片成功!")

print(f"保存{h1}所有图片成功!")

#hqlj("https://zhuanlan.zhihu.com/api/columns/cun-design/articles?include=data%5B%2A%5D.admin_closed_comment%2Ccomment_count%2Csuggest_edit%2Cis_title_image_full_screen%2Ccan_comment%2Cupvoted_followees%2Ccan_open_tipjar%2Ccan_tip%2Cvoteup_count%2Cvoting%2Ctopics%2Creview_info%2Cauthor.is_following%2Cis_labeled%2Clabel_info&limit=10&offset=20")

hqnr("https://zhuanlan.zhihu.com/p/59964490")

'''if __name__ == '__main__':lj=hqlj("https://zhuanlan.zhihu.com/api/columns/cun-design/articles?include=data%5B%2A%5D.admin_closed_comment%2Ccomment_count%2Csuggest_edit%2Cis_title_image_full_screen%2Ccan_comment%2Cupvoted_followees%2Ccan_open_tipjar%2Ccan_tip%2Cvoteup_count%2Cvoting%2Ctopics%2Creview_info%2Cauthor.is_following%2Cis_labeled%2Clabel_info&limit=10&offset=20")for url in lj:print(url)hqnr(url)'''

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值