# -*- coding: utf-8 -*-
"""
Created on Mon Jul 10 16:34:27 2017
@author: ahchpr
filename: re_zero_bili.py
"""
import requests, csv, re, time
from bs4 import BeautifulSoup as BS
from selenium import webdriver
import datetime
from multiprocessing import Pool
import sys
# Re:从零开始的异世界生活 的总剧情首页
first_url = 'https://bangumi.bilibili.com/anime/3461'
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'}
one_url = 'https://bangumi.bilibili.com/anime/3461/play#86298'
#history_danmu_url = 'https://comment.bilibili.com/dmroll,time,cid'
#now_danmu_url = 'https://comment.bilibili.com/{}.xml'.format(danmu_id)
def get_danmu_id(url):
MyDriver = webdriver.PhantomJS()
MyDriver.get(url)
time.sleep(3)
danmu_id = re.findall(r'cid=(\d+)&', MyDriver.page_source)[0]
return (danmu_id)
def sele_get_first(url):
MyDriver = webdriver.PhantomJS()
MyDriver.get(url)
time.sleep(5)
response = MyDriver.page_source.encode('utf-8')
page = response.decode('utf-8')
return (page)
def sele_get_re_list(page):
pattern = re.compile('<a.*?href="(.*?)" title="(.*?)" target.*? class="v1-complete-text"><div class=')
abstract = re.findall(pattern, page)
return (abstract)
def request_get_comment(url):
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko)'}
episode = url.split(" ")[0]
url = url.split(" ")[1].strip()
response = requests.get(url=url, headers=headers)
soup = BS(response.text, 'lxml')
result = soup.find_all('d')
if len(result) == 0:
return (result)
all_list = []
for item in result:
# danmu_list.append(item.get('p').split(",").append(item.string))
danmu_list = item.get('p').split(",")
danmu_list.append(item.string)
# danmu_list[0] = sec_to_str(danmu_list[0])
# danmu_list[4] = time.ctime(eval(danmu_list[4]))
danmu_list.append(episode)
# print(danmu_list)
all_list.append(danmu_list)
return (all_list)
"""将秒转换成固定格式 "hh:mm:ss"
"""
def sec_to_str(seconds):
seconds = eval(seconds)
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
dtEventTime = "%02d:%02d:%02d" % (h, m, s)
return (dtEventTime)
"""计算最近30天的每天的时间戳,并返回,用于获取历史弹幕
"""
def time_to_stamp():
today = datetime.date.today()
end_day = datetime.datetime(today.year, today.month, today.day)
start_day = end_day - datetime.timedelta(30)
gap_day_sum = 30
stamp_list = []
for i in range(1, gap_day_sum):
tmp = start_day + datetime.timedelta(i)
stamp_list.append(int(time.mktime(tmp.timetuple())))
return (stamp_list)
def csv_write(tablelist, num):
tableheader = ['dtTime', 'danmu_model', 'font', 'rgb', 'stamp', 'danmu_chi', 'userID', 'rowID', 'message', 'episode']
file_name = "now{}.csv".format(num)
print(file_name)
with open(file_name, 'w', newline='', errors='ignore') as fd:
writer = csv.writer(fd)
writer.writerow(tableheader)
for row in tablelist:
writer.writerow(row)
if __name__ == "__main__":
sys.setrecursionlimit(1000000)
"""爬取首页,获取共25话 《re:从零开始的异世界生活》 的播放连接
"""
page = sele_get_first(first_url)
re_list = sele_get_re_list(page)
# print(len(re_list))
"""以字典的形式保存例如:
{'1': ['初始的终结与结束的开始', 'https://bangumi.bilibili.com/anime/3461/play#85754'],...}
"""
《用python 玩转数据》项目——B站弹幕数据分析
最新推荐文章于 2024-10-02 16:17:04 发布
本文介绍了一个用于从B站抓取指定番剧弹幕数据的Python脚本,并展示了如何通过数据清洗、统计及可视化手段分析弹幕的内容、热度、用户行为等关键信息。

最低0.47元/天 解锁文章
2985

被折叠的 条评论
为什么被折叠?



