#爬取 豆瓣电影Top250
#250个电影 ,分为10个页显示,1页有25个电影
import urllib.request
from bs4 import BeautifulSoup
url = "http://movie.douban.com/top250"
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'}
#headers 要根据自己的网页抓取情况修改
targetPath = "storage path" #填写自己想要存储的地址
def saveText(f,text):
f.write(text)
#获取网页源码
def getData(url,headers):
req = urllib.request.Request(url = url , headers = headers)
res = urllib.request.urlopen(req)
data = res.read()
return data
#解析网页
def praseHtml(f,url,headers):
currenturl = url
i = 1 #序号
#flag = True
while currenturl :
#解析当前页,获取想要的内容
html = getData(currenturl,headers)
soup = BeautifulSoup(html,'lxml')
moveList = soup.find('ol',attrs = {'class':'grid_view'})
for moveLi in moveList.find_all('li'):
detail = moveLi.find('div',attrs = {'class':'hd'})
moveName = detail.find('span',attrs = {'class':'title'})
saveText(f,str(i)+ moveName.getText()+'\n')
i += 1
print(moveName.getText())
#下一页
nextpage = soup.find('span',attrs = {'class':'next'}).find('a')
#next = nextpage['href'] #这样写报错:NoneType object is not subscriptable
if nextpage:
currenturl = url + nextpage['href']
else :
currenturl = None
f = open(targetPath,"w")
praseHtml(f,url,headers)
Python爬虫——豆瓣电影Top250
最新推荐文章于 2024-12-02 17:30:59 发布
本文介绍了一个使用Python的urllib和BeautifulSoup库来爬取豆瓣电影Top250榜单的简单程序。该程序能够自动翻页并抓取所有250部电影的名字,最终将这些数据保存到本地文件中。
923

被折叠的 条评论
为什么被折叠?



