Python爬虫

代码展示

python爬豆瓣电影

import requests
import bs4
import re


def open_url(url):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/84.0.4147.89 Safari/537.36 SLBrowser/7.0.0.4071 SLBChan/30 '
    }
    res = requests.get(url, headers=headers)
    return res


def find_movies(res):
    soup = bs4.BeautifulSoup(res.text, 'html.parser')
    # 电影名
    movies = []
    targets = soup.find_all("div", class_="hd")
    for each in targets:
        movies.append(each.a.span.text)
    # 评分
    ranks = []
    targets = soup.find_all("span", class_="rating_num")
    for each in targets:
        ranks.append('评分:%s ' % each.text)
    # 资料
    messages = []
    targets = soup.find_all("div", class_="bd")
    for each in targets:
        try:
            messages.append(each.p.text.split('\n')[1].strip() + each.p.text.split('\n')[2].strip())
        except:
            continue

    result = []
    length = len(movies)
    for i in range(length):
        result.append(movies[i] + ranks[i] + messages[i] + '\n')
    return result


# 找出一共有多少个页面
def find_depth(res):
    soup = bs4.BeautifulSoup(res.text, 'html.parser')
    depth = soup.find('span', class_='next').previous_sibling.previous_sibling.text
    return int(depth)


def main():
    host = "https://movie.douban.com/top250"
    res = open_url(host)
    depth = find_depth(res)

    result = []
    for i in range(depth):
        url = host + '/?start=' + str(25 * i)
        res = open_url(url)
        result.extend(find_movies(res))

    with open("豆瓣TOP250电影.txt", "w", encoding="utf-8") as f:
        for each in result:
            f.write(each)


if __name__ == "__main__":
    main()

python爬取大量图片

import os
import requests
import re

if __name__ == "__main__":

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) '
                      'Chrome/84.0.4147.89 Safari/537.36 SLBrowser/7.0.0.4071 SLBChan/30 '
    }
    # 创建文件夹,保存所有图片
    if not os.path.exists('./images'):
        os.mkdir('./images')

    url = 'https://www.qiushibaike.com/imgrank/page/%d/'
    for pageNum in range(1, 6):
        # 对应页码的url
        new_url = format(url % pageNum)

        # 使用通用爬虫对url对应的一整张页面进行爬取
        page_text = respond = requests.get(url=new_url, headers=headers).text
        # 使用聚焦爬虫将页面所有的图进行解析/提取
        # 正则表达式
        ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>'
        img_src_list = re.findall(ex, page_text, re.S)
        # print(img_src_list)
        for src in img_src_list:
            # 拼接出完整的图片url
            src = 'https:' + src
            # 请求到图片的二进制数据
            img_data = requests.get(url=src, headers=headers).content
            # 生成图片名称
            img_name = src.split('/')[-1]
            # 图片存储路径
            imgPath = './images/' + img_name
            with open(imgPath, 'wb') as fp:
                fp.write(img_data)
                print('下载成功')

# 正则表达式
# <div class="thumb">
#
# <a href="/article/124274987" target="_blank"> <img
# src="//pic.pushbike.com/system/pictures/12427/124274987/medium/WOLF0PYTHONS.jpg" alt="糗事#124274987"
# class="illustration" width="100%" height="auto"> </a> </div>
#
#
# ex = '<div class="thumb">.*?<img src="(.*?)" alt.*?</div>'

评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值