py
import requests
from bs4 import BeautifulSoup
url=“https://www.jhc.cn/4548/list.htm”
html=requests.get(url)
htmlurl=“https://www.jhc.cn/”
html.encoding=“utf-8”
bs=BeautifulSoup(html.text,‘lxml’)
list=[];
write=’’;
title=bs.select(“span[class=‘Article_Title’] a”)
for i in title:
print(i.text)
urlss=htmlurl+i.attrs[‘href’]
print(urlss)
list.append(urlss)
for j in list:
urls=requests.get(j)
urls.encoding = “utf-8”
bs = BeautifulSoup(urls.text, ‘lxml’)
titles=bs.select(“h1”)
print(titles[0].text)
time=bs.select(“span[class=‘arti-update’]”)
print(time[0].text)
text=bs.select(“article div p”)
print(text[0].text)
ss=titles[0].text+"\n"+time[0].text+"\n"+text[0].text+"\n\n"
write=write+ss
with open(‘text.txt’, ‘w’,encoding=“utf-8”) as f:
f.write(write)
import requests
from bs4 import BeautifulSoup
urls=“https://www.jhc.cn/4548/list.htm”
html=requests.get(urls)
html.encoding=“utf-8”
bs=BeautifulSoup(html.text,‘lxml’)
ls=bs.select(".wp_article_list")
print(ls[0].getText())
hs=bs.select(".Article_Title a")
count=""
for i in range(1,len(hs),1):
htmls=“https://www.jhc.cn”+hs[i].get(‘href’)
print(htmls)
url=htmls
htm11=requests.get(url)
htm11.encoding=“utf-8”
br=BeautifulSoup(htm11.text,‘lxml’)
bt=br.select(".arti-title")
print(len(bt))
sj=br.select(".arti-update")
zw=br.select(".wp_articlecontent")
print("=标题=")
print(bt[0].string)
print("=时间=")
print(sj[0].getText())
print("=正文=")
print(zw[0].getText())
ss=bt[0].string+"\n"+sj[0].getText()+"\n"+zw[0].getText()+"\n"+"==========================="+"\n"
count=count+ss
with open(“big.txt”,“w”,encoding=“utf-8”) as f:
f.write(count)