作业:利用BeautifulSoup4整理电影数据到csv表格
import requests
import csv
import os
from tqdm import tqdm
from bs4 import BeautifulSoup
def requests_get(URL):
Headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.64 Safari/537.36'
}
while True:
try:
resp = requests.get(url=URL, headers=Headers, timeout=3)
break
except:
pass
if resp.status_code == 200:
return resp
else:
print(resp.status_code)
def single_link_list(soup):
origin_list = soup.select('ol div.hd > a')
return [x.attrs["href"] for x in origin_list]
def target_info(soup):
# 获得电影名
movie_name = soup.select_one('#content > h1 > span:nth-child(1)').text
# 获得导演列表
directors0 = soup.select('#info > span:nth-child(1) > span.attrs > a')
directors1 = '/'.join([x.text for x in directors0])
# 获得演员列表
actors0 = soup.select('div#info > span.actor > span.attrs')
actors1 = ''.join([x.text for x in actors0])
# 获得info文本内容,为后续查找不方便定位的"类型"和无标签的"制片国家"、"语言"做准备
info_text = soup.select_one('#info').text
# 获得电影类型
a = info_text.find('类型')
if '官方网站' in info_text:
b = info_text.find('官方网站')
else:
b = info_text.find('制片')
movie_type = info_text[a + 4: b - 1]
# 获得制片国家
a = info_text.find('地区')
b = info_text.find('语言')
movie_nation = info_text[a + 4: b - 1]
# 获得电影语言
a = info_text.find('语言')
b = info_text.find('上映日期')
language = info_text[a + 4: b - 1]
# 获得上映时间
a = info_text.find('上映日期')
b = info_text.find('片长')
publish_time = info_text[a + 6: b - 1]
# 获得电影评分
rating_num = soup.select_one('#interest_sectl > div.rating_wrap.clearbox > div.rating_self.clearfix > strong').text
return [movie_name, directors1, actors1, movie_type, movie_nation, language, publish_time, rating_num]
def create_file(path):
if not os.path.exists(path):
with open(path, 'w', encoding='utf-8', newline='') as f:
csv.writer(f).writerow(['电影名', '导演', '主演', '类型', '制片国家/地区', '语言', '上映日期', '评分'])
if __name__ == '__main__':
create_file('top250.csv')
for x in tqdm(range(0, 226, 25)):
with open('top250.csv', 'a', encoding='utf-8', newline='') as f:
writer = csv.writer(f)
URL = f'https://movie.douban.com/top250?start={x}&filter='
soup0 = BeautifulSoup(requests_get(URL).text, 'lxml')
single_page_link_list = single_link_list(soup0)
for movie_link in tqdm(single_page_link_list):
soup1 = BeautifulSoup(requests_get(movie_link).text, 'lxml')
writer.writerow(target_info(soup1))