1. 将新闻的正文内容保存到文本文件。
content = soup.select('.show-content')[0].text
f = open('gzccNews.txt','a',encoding='utf8')
f.write(content)
f.close()
2. 将新闻数据结构化为字典的列表:
news = {}
news['title'] = soupd.select('.show-title')[0].text
news['newsUrl'] = newsUrl
info = soupd.select('.show-info')[0].text
time = re.search('发布时间:(.*) \xa0\xa0 \xa0\xa0作者:', info).group(1)
news['dtime'] = datetime.strptime(time, '%Y-%m-%d %H:%M:%S')
if info.find('作者:') > 0:
news['author'] = info[info.find('作者:'):].split()[0].lstrip('作者:')
else:
news['author'] = '无'
if info.find('审核:') > 0:
news['check'] = info[info.find('审核:'):].split()[0].lstrip('审核:')
else:
news['check'] = '无'
if info.find('来源:') > 0:
news['source'] = info[info.find('来源:'):].split()[0].lstrip('来源:')
else:
news['source'] = '无'
if info.find('摄影:') > 0:
news['photo'] = info[info.find('摄影:'):].split()[0].lstrip('摄影:')
else:
news['photo'] = '无'
news['clickCount'] = getClickCount(newsUrl)
news['content'] = soupd.select('.show-content')[0].text
复制代码
一个列表页所有单条新闻汇总-->列表newsls.append(news)
newsList = []
for news in soup.select('li'):
if len(news.select('.news-list-title'))>0:
a = news.a.attrs['href']
newsList.append(getNewDetail(a))
所有列表页的所有新闻汇总列表newstotal.extend(newsls)
复制代码
newsTotal = []
firstUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
newsTotal.extend(getLiUrl(firstUrl))
res = requests.get(firstUrl)
res.encoding = 'utf-8'
soupn = BeautifulSoup(res.text,'html.parser')
n = int(soupn.select('.a1')[0].text.rstrip('条'))//10+1
for i in range(2,n):
pageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
print('第{}页:'.format(i))
newsTotal.extend(getLiUrl(pageUrl))
break
复制代码
3. 安装pandas,用pandas.DataFrame(newstotal),创建一个DataFrame对象df.
import pandas
df = pandas.DataFrame(newsTotal)
4. 通过df将提取的数据保存到csv或excel 文件。
df.to_excel('gzccnews.xlsx')
df.to_csv('gzccnews.xlsx')
5. 用pandas提供的函数和方法进行数据分析:
print(df[['clickCount','title','source']].head(6))
print(df[(df['clickCount'] > 3000) & (df['source'] == '学校综合办')])
news_info = ['国际学院','学生工作处']
print(df[df['source'].isin(news_info)])