分享两个python爬虫模板及对应爬虫案例

模板一:(使用BeautifulSoup解析)

import requests
import csv
from bs4 import BeautifulSoup

#请求url
url = ''
#请求头部
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}

#解析界面函数
def parse_html(html):
    soup = BeautifulSoup(html, 'lxml')




#保存数据函数
def save_data():
    f = open('','a',newline='',encoding='utf-8-sig')
    global writer
    writer = csv.writer(f)
    writer.writerow(['','',''])



    f.close()


if __name__ == '__main__':
    save_data()












案例一:

爬取豆瓣电影top250。

import requests
from bs4 import BeautifulSoup
import csv

# 请求URL
url = 'https://movie.douban.com/top250'
# 请求头部
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}

# 解析页面函数
def parse_html(html):
    soup = BeautifulSoup(html, 'lxml')
    movie_list = soup.find('ol', class_='grid_view').find_all('li')
    for movie in movie_list:
        title = movie.find('div', class_='hd').find('span', class_='title').get_text()
        rating_num = movie.find('div', class_='star').find('span', class_='rating_num').get_text()
        comment_num = movie.find('div', class_='star').find_all('span')[-1].get_text()
        writer.writerow([title, rating_num, comment_num])

# 保存数据函数
def save_data():
    f = open('top250.csv', 'a', newline='', encoding='utf-8-sig')
    global writer
    writer = csv.writer(f)
    writer.writerow(['电影名称', '评分', '评价人数'])
    for i in range(10):
        url = 'https://movie.douban.com/top250?start=' + str(i*25) + '&filter='
        response = requests.get(url, headers=headers)
        parse_html(response.text)
    f.close()

if __name__ == '__main__':
    save_data()

模板二:(采集数据模板(etree解析))

from wsgiref.util import request_uri

import requests
import csv
from lxml import etree

#请求url
url = ''
#请求头部
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}

#解析界面函数
def parse_html(html):
    selector = etree.HTML(html)
    book_list = selector.xpath('')
    output_list = []
    for each in book_list:

        output_list.append({

        })
    return output_list



#保存数据函数
def save_data():
    f = open('.csv','w',newline='',encoding='utf-8-sig')
    writer = csv.writer(f)
    writer.writerow(['', '', '', '', '', ''])
    html = requests.get(url,headers=headers)
    output_list = parse_html(html.text)
    for each in output_list:
        writer.writerow([each[''],each[''],each[''],each[''],each[''],each[''],])
    f.close()


if __name__ == '__main__':
    save_data()












案例二:

爬取当当网图书信息。

import requests
from lxml import etree
import csv

# 请求URL
url = 'http://search.dangdang.com/?key=Python&act=input'
# 请求头部
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36'
}

# 解析页面函数
def parse_html(html):
    selector = etree.HTML(html)
    # print(type(selector))
    book_list = selector.xpath('//*[@id="search_nature_rg"]/ul/li')
    output_list = []
    for book in book_list:
        title = book.xpath('a/@title')[0]
        link = book.xpath('a/@href')[0]
        price = book.xpath('p[@class="price"]/span[@class="search_now_price"]/text()')[0]
        author = book.xpath('p[@class="search_book_author"]/span[1]/a/@title')[0]
        publish_date = book.xpath('p[@class="search_book_author"]/span[2]/text()')
        if publish_date != []:
            publish_date = book.xpath('p[@class="search_book_author"]/span[2]/text()')[0]
        publisher = book.xpath('p[@class="search_book_author"]/span[3]/a/@title')[0]
        output_list.append({
            '书名': title,
            '链接': link,
            '价格': price,
            '作者': author,
            '出版日期': publish_date,
            '出版社': publisher
        })
    return output_list

# 保存数据函数
def save_data():
    f = open('dangdang_books.csv', 'w', newline='', encoding='utf-8-sig')
    writer = csv.writer(f)
    writer.writerow(['书名', '链接', '价格', '作者', '出版日期', '出版社'])
    response = requests.get(url, headers=headers)
    output_list = parse_html(response.text)
    print(output_list)
    for item in output_list:
        writer.writerow([item['书名'],item['链接'],item['价格'],item['作者'],item['出版日期'],item['出版社']])
    f.close()

if __name__ == '__main__':
    save_data()

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

A_Fighting

给2元买辣条,到贵港我请你嗦粉

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值