requests bs4 datetime re json

本文介绍了一个使用Python爬取特定新闻网站上文章详细信息的方法,包括标题、来源、发布时间、正文内容及责任编辑等,并获取了相关新闻的评论数量。通过requests、BeautifulSoup等库实现了网页内容的抓取与解析。
 
     
import re
import json
import requests
from bs4 import BeautifulSoup
from datetime import datetime
url='https://news.qq.com/a/20180520/008334.htm'
fuck={}
res=requests.get(url)
soup = BeautifulSoup(res.text, 'html.parser')
res.encoding='utf-8'
fuck['title']=soup.select('.hd h1')[0].text
fuck['editor']=soup.select('#QQeditor')[0].text
fuck['origin']=soup.select('.a_source')[0].text
fuck['time']=soup.select('.a_time')[0].text
#timesource = soup.select('.time-source')[0].contents[0].strip()
#time1=soup.select('.a_time')[0].contents[0].strip()
#fuck['dt'] = datetime.strptime(time1,'%Y年%m月%d日%H:%M')
fuck['article']='@'.join([p.text.strip() for p in soup.select('#Cnt-Main-Article-QQ p')[:]])#到哪一步
fuck
 
     

 

#原来的一个案例 2016
import
requests from bs4 import BeautifulSoup from datetime import datetime import re import json commenturl = 'http://comment5.news.sina.com.cn/page/info?version=1&format=js&channel=gn&newsid=comos-{}&\ group=&compress=0&ie=utf-8&oe=utf-8&page=1&\ page_size=20' def getCommentCounts(newsurl): m = re.search('doc-i(.*).shtml', newsurl) newsid = m.group(1) comments = requests.get(commenturl.format(newsid)) print(commenturl.format(newsid)) jd = json.loads(comments.text.strip('var data=')) return jd['result']['count']['total'] def getNewsDetail(newsurl): result = {} res = requests.get(newsurl) res.encoding = 'utf-8' soup = BeautifulSoup(res.text, 'html.parser') result['title'] = soup.select('#artibodyTitle')[0].text result['newssource'] = soup.select('.time-source span a')[0].text timesource = soup.select('.time-source')[0].contents[0].strip() result['dt'] = datetime.strptime(timesource,'%Y年%m月%d日%H:%M') result['article'] = '@'.join([p.text.strip() for p in soup.select('#artibody p')[:-1]]) result['editor'] = soup.select('.article-editor')[0].text.strip('责任编辑:') result['comments'] = getCommentCounts(newsurl) return result newsurl = 'http://news.sina.com.cn/c/nd/2016-12-18/doc-ifxytqax6457791.shtml' #只要这条代码中的newsurl具体赋值就可以了 print(getNewsDetail(newsurl))

 

posted on 2018-05-20 17:47 leolaosao 阅读( ...) 评论( ...) 编辑 收藏

转载于:https://www.cnblogs.com/leolaosao/p/9064013.html

import&nbsp;json import&nbsp;re import&nbsp;requests import&nbsp;datetime from&nbsp;bs4&nbsp;import&nbsp;BeautifulSoup import&nbsp;os def&nbsp;crawl_wiki_data(): &nbsp;&nbsp;&nbsp;&nbsp;""" &nbsp;&nbsp;&nbsp;&nbsp;爬取百度百科中《乘风破浪的姐姐》中嘉宾信息,返回html &nbsp;&nbsp;&nbsp;&nbsp;""" &nbsp;&nbsp;&nbsp;&nbsp;headers&nbsp;=&nbsp;{&nbsp; &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;'User-Agent':&nbsp;'Mozilla/5.0&nbsp;(Windows&nbsp;NT&nbsp;10.0;&nbsp;WOW64)&nbsp;AppleWebKit/537.36&nbsp;(KHTML,&nbsp;like&nbsp;Gecko)&nbsp;Chrome/67.0.3396.99&nbsp;Safari/537.36' &nbsp;&nbsp;&nbsp;&nbsp;} &nbsp;&nbsp;&nbsp;&nbsp;url='https://baike.baidu.com/item/乘风破浪的姐姐'&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp;try: &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;response&nbsp;=&nbsp;requests.get(url,headers=headers) &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#将一段文档传入BeautifulSoup的构造方法,就能得到一个文档的对象,&nbsp;可以传入一段字符串 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;soup&nbsp;=&nbsp;BeautifulSoup(response.text,'lxml')&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#返回所有的<table>所有标签 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;tables&nbsp;=&nbsp;soup.find_all('table') &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;crawl_table_title&nbsp;=&nbsp;"按姓氏首字母排序" &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for&nbsp;table&nbsp;in&nbsp;&nbsp;tables:&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;#对当前节点前面的标签和字符串进行查找 &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;table_titles&nbsp;=&nbsp;table.find_previous('div') &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;for&nbsp;title&nbsp;in&nbsp;table_titles: &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;if(crawl_table_title&nbsp;in&nbsp;title): &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;return&nbsp;table&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp; &nbsp;&nbsp;&nbsp;&nbsp;except&nbsp;Exception&nbsp;as&nbsp;e: &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;print(e)
03-27
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值