豆瓣爬取
获取链接
def askURL(url):
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36 Edg/84.0.522.58"
}
req = urllib.request.Request(url, headers=headers)
html = ""
try:
response = urllib.request.urlopen(req)
html = response.read().decode('utf-8')
except (urllib.error.URLError, urllib.error.HTTPError) as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
return html
解析链接(先用beautifulsoup匹配大体部分标签,在用正则精准匹配)
# 影片链接字符串匹配模板
findLink = re.compile(r'<a href="(.*?)">') # <a是链接标志,作为字符串匹配出的结果也是链接
# 影片图片
findImage = re.compile(r'<img.*src="(.*?)"', re.S) # re.S 进行多行匹配
# 影片片名
findTitle = re.compile(r'<span class="title">(.*?)</span>')
# 影片评分
fingRating = re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')
# 评分人数
findJudge = re.compile(r'<span>(\d*)人评价</span>')
# 影片介绍
findInq = re.compile(r'<span class="inq">(.*?)</span>')
# 影片概述
findBd = re.compile(r'<p class="">(.*?)</p>', re.S)
def getData(baseurl):
datalist = []
for i in range(10):
url = baseurl + str(i*25)
html = askURL(url)
# 2、逐一解析数据
soup = bs4.BeautifulSoup(html, "html.parser")
for item in soup.find_all('div', class_="item"): # 查找符合要求的字符串
data = [] # 保存 数据
item = str(item)
titles = re.findall(findTitle, item) # 正则匹配获取列表第一个元素
if len(titles) == 2:
ctitle = titles[0]
data.append(ctitle) # 添加中文名
etitle = titles[1]
etitle = re.sub("/", "", etitle) # 替换/
data.append(etitle) # 添加英文名
else:
data.append(titles[0])
data.append("无")
link = re.findall(findLink, item)[0]
data.append(link) # 添加电影链接
rating = re.findall(fingRating, item)[0]
data.append(rating) # 添加评分
JudgeNUm = re.findall(findJudge, item)[0]
data.append(JudgeNUm) # 添加人数
inq = re.findall(findInq, item)
if len(inq) != 0:
inq = inq[0].replace("。", "")
data.append(inq) # 添加介绍
else:
data.append("无") # 有的电影没有简介
bd = re.findall(findBd, item)[0]
bd = re.sub("<br(/s+)?/>(/s+)?", "", bd) # 去掉<br/>
bd = re.sub("/", "", bd) # 替换/
data.append(bd.strip()) # 去掉前后的空格
# image = re.findall(findImage, item)[0]
# data.append(image) # 添加图片
datalist.append(data) # 将处理好的一部电影储存 一个div->item标签
return datalist
保存数据(文本,excel,数据库(sqlite))
def saveText(datalist):
path = "豆瓣Top250.txt"