# encoding=utf-8
from bs4 import BeautifulSoup
import requests
import codecs
DOWNLOAD_URL = 'http://movie.douban.com/top250/'
#requests模拟http协议中的GET请求,用于获取目标网站的源码
def download_page(url):
return requests.get(url).content
#获取网站源代码
#Bbeautiful是一个可以从html文件中提取数据的python库
def parse_html(html):
#接受html源码作为输入
soup = BeautifulSoup(html,"html.parser")
#创建beautifulsoup对象
movie_list_soup = soup.find('ol', attrs={'class': 'grid_view'})
#通过刚才的对象查找有序列表
movie_name_list=[]
#初始化列表
for movie_li in movie_list_soup.find_all('li'):
detail = movie_li.find('div', attrs={'class': 'hd'})
movie_name = detail.find('span', attrs={'class': 'title'}).getText()
#以上请参考网站源码
movie_name_list.append(movie_name)
#循环去寻找每个列表子项并将其加入到列表当中去
next_page = soup.find('span', attrs={'class': 'next'}).find('a')
if next_page:
return movie_name_list, DOWNLOAD_URL + next_page['href']
return movie_name_list,None
#print movie_name
def main():
url = DOWNLOAD_URL
with codecs.open('movies', 'wb', encoding='utf-8') as fp:
while url:
html = download_page(url)
movies, url = parse_html(html)
fp.write(u'{movies}\n'.format(movies='\n'.join(movies)))
if __name__ == '__main__':
main()