1.取出一个新闻列表页的全部新闻 包装成函数。
2.获取总的新闻篇数,算出新闻总页数。
3.获取全部新闻列表页的全部新闻详情。
import requests
from bs4 import BeautifulSoup
from datetime import datetime
import re
# 获取新闻点击次数
def getNewsId(url):
newsId = re.findall(r'\_(.*).html', url)[0][-4:]
clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
clickRes = requests.get(clickUrl)
# 利用正则表达式获取新闻点击次数
clickCount = int(re.search("hits'\).html\('(.*)'\);", clickRes.text).group(1))
return clickCount
# 获取新闻细节
def getNewsDetail(newsUrl):
resd = requests.get(newsUrl)
resd.encoding = 'utf-8'
soupd = BeautifulSoup(resd.text, 'html.parser')
content = soupd.select('#content')[0].text
info = soupd.select('.show-info')[0].text
# 调用getNewsId()获取点击次数
count = getNewsId(newsUrl)
# 识别时间格式
date = re.search('(\d{4}.\d{2}.\d{2}\s\d{2}.\d{2}.\d{2})', info).group(1)
# 识别一个至三个数据
if(info.find('作者:')>0):
author = re.search('作者:((.{2,4}\s|.{2,4}、){1,3})', info).group(1)
if(info.find('审核:')>0):
check = re.search('审核:((.{2,4}\s){1,3})', info).group(1)
if(info.find('来源:')>0):
sources = re.search('来源:(.*)\s*摄|点', info).group(1)
# 用datetime将时间字符串转换为datetime类型
dateTime = datetime.strptime(date, '%Y-%m-%d %H:%M:%S')
# 利用format对字符串进行操作
print('发布时间:{0}\n作者:{1}\n审核:{2}\n来源:{3}\n点击次数:{4}'.format(dateTime, author, check, sources, count))
print(content)
# 获取列表页新闻
def getListPage(listUrl):
res = requests.get(listUrl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
for new in soup.select('li'):
if len(new.select('.news-list-title')) > 0:
title = new.select('.news-list-title')[0].text
description = new.select('.news-list-description')[0].text
newsUrl = new.select('a')[0]['href']
print('标题:{0}\n内容:{1}\n链接:{2}'.format(title, description, newsUrl))
# 调用getNewsDetail()获取新闻详情
getNewsDetail(newsUrl)
break
# 获取新闻总页数
def getListTotalNumber(firstUrl):
res = requests.get(firstUrl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
listCount = int(soup.select('.a1')[0].text.rstrip('条')) // 10 + 1
return listCount
firstUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
getListPage(firstUrl)
# 获取新闻总页数
n=getListTotalNumber(firstUrl)
for i in range(2,n):
listUrl= 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
getListPage(listUrl)
4.找一个自己感兴趣的主题,进行数据爬取,并进行分词分析。不能与其它同学雷同。
import requests
from bs4 import BeautifulSoup
# 获取新闻细节
def getNewsDetail(newsUrl):
resd = requests.get(newsUrl)
resd.encoding = 'utf-8'
soupd = BeautifulSoup(resd.text, 'html.parser')
content = soupd.select('.cont')[0].text.rsplit("复仇者联盟2:奥创纪元下载地址:http://www.80smp4.net/mp4_3gp/26733/")[0]
print('内容:{}'.format(content))
# 获取列表页新闻
def getListPage(listUrl):
res = requests.get(listUrl)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text, 'html.parser')
for new in soup.select('div'):
if len(new.select('.ph_u')) > 0:
description = new.select('.title')[0].text.split()[0]
newsUrl = new.select('a')[0]['href']
# print('标题:{0}\n内容:{1}\n链接:{2}'.format(title, description, newsUrl))
# 调用getNewsDetail()获取新闻详情
print('片名:{0}\n连接:{1}'.format(description,newsUrl))
getNewsDetail(newsUrl)
break
firstUrl = 'http://www.80smp4.net/movie/'
getListPage(firstUrl)