模板一:(使用BeautifulSoup解析)
import requests
import csv
from bs4 import BeautifulSoup
#请求url
url = ''
#请求头部
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
#解析界面函数
def parse_html(html):
soup = BeautifulSoup(html, 'lxml')
#保存数据函数
def save_data():
f = open('','a',newline='',encoding='utf-8-sig')
global writer
writer = csv.writer(f)
writer.writerow(['','',''])
f.close()
if __name__ == '__main__':
save_data()
案例一:
爬取豆瓣电影top250。
import requests
from bs4 import BeautifulSoup
import csv
# 请求URL
url = 'https://movie.douban.com/top250'
# 请求头部
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
# 解析页面函数
def parse_html(html):
soup = BeautifulSoup(html, 'lxml')
movie_list = soup.find('ol', class_='grid_view').find_all('li')
for movie in movie_list:
title = movie.find('div', class_='hd').find('span', class_='title').get_text()
rating_num = movie.find('div', class_='star').find('span', class_='rating_num').get_text()
comment_num = movie.find('div', class_='star').find_all('span')[-1].get_text()
writer.writerow([title, rating_num, comment_num])
# 保存数据函数
def save_data():
f = open('top250.csv', 'a', newline='', encoding='utf-8-sig')
global writer
writer = csv.writer(f)
writer.writerow(['电影名称', '评分', '评价人数'])
for i in range(10):
url = 'https://movie.douban.com/top250?start=' + str(i*25) + '&filter='
response = requests.get(url, headers=headers)
parse_html(response.text)
f.close()
if __name__ == '__main__':
save_data()
模板二:(采集数据模板(etree解析))
from wsgiref.util import request_uri
import requests
import csv
from lxml import etree
#请求url
url = ''
#请求头部
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
#解析界面函数
def parse_html(html):
selector = etree.HTML(html)
book_list = selector.xpath('')
output_list = []
for each in book_list:
output_list.append({
})
return output_list
#保存数据函数
def save_data():
f = open('.csv','w',newline='',encoding='utf-8-sig')
writer = csv.writer(f)
writer.writerow(['', '', '', '', '', ''])
html = requests.get(url,headers=headers)
output_list = parse_html(html.text)
for each in output_list:
writer.writerow([each[''],each[''],each[''],each[''],each[''],each[''],])
f.close()
if __name__ == '__main__':
save_data()
案例二:
爬取当当网图书信息。
import requests
from lxml import etree
import csv
# 请求URL
url = 'http://search.dangdang.com/?key=Python&act=input'
# 请求头部
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}
# 解析页面函数
def parse_html(html):
selector = etree.HTML(html)
# print(type(selector))
book_list = selector.xpath('//*[@id="search_nature_rg"]/ul/li')
output_list = []
for book in book_list:
title = book.xpath('a/@title')[0]
link = book.xpath('a/@href')[0]
price = book.xpath('p[@class="price"]/span[@class="search_now_price"]/text()')[0]
author = book.xpath('p[@class="search_book_author"]/span[1]/a/@title')[0]
publish_date = book.xpath('p[@class="search_book_author"]/span[2]/text()')
if publish_date != []:
publish_date = book.xpath('p[@class="search_book_author"]/span[2]/text()')[0]
publisher = book.xpath('p[@class="search_book_author"]/span[3]/a/@title')[0]
output_list.append({
'书名': title,
'链接': link,
'价格': price,
'作者': author,
'出版日期': publish_date,
'出版社': publisher
})
return output_list
# 保存数据函数
def save_data():
f = open('dangdang_books.csv', 'w', newline='', encoding='utf-8-sig')
writer = csv.writer(f)
writer.writerow(['书名', '链接', '价格', '作者', '出版日期', '出版社'])
response = requests.get(url, headers=headers)
output_list = parse_html(response.text)
print(output_list)
for item in output_list:
writer.writerow([item['书名'],item['链接'],item['价格'],item['作者'],item['出版日期'],item['出版社']])
f.close()
if __name__ == '__main__':
save_data()