Python爬虫----------------爬取美食网菜谱(数据库版)

Python爬虫----------------爬取美食网菜谱(数据库版)

import requests
import pymysql
from bs4 import BeautifulSoup
#---------------------------爬取目标网页-----------
def pa():
    indexs = 0
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'
    }
    # 建立连接
    conn = pymysql.connect(host='localhost', user='root', password='root',db='spiderdb', port=3306, charset='utf8')
    # 建立游标
    cursor = conn.cursor()
    # 数据库操作
    # (1)定义一个格式化的sql语句
    sql = 'insert into foods(id,foodname,picsrc,url,foodmaterial,foodstep,fooddoor) values(%s,%s,%s,%s,%s,%s,%s) '

    for page in range(10,95888):#(95888-10)道菜
        url = f'https://home.meishichina.com/recipe-{page}.html'
        response = requests.get(url=url, headers=headers)
        response.encoding='utf-8'#统一改成utf-8
        soup =BeautifulSoup(response.text,'lxml')#将爬取的网页以字符串的形式传入
        foods = soup.select('.recipe_De_title')#选中class选择器(菜名)
        material = soup.find_all('div',attrs={'class': 'recipeCategory_sub_R'})#(材料)
        step = soup.find_all('div',attrs={'class': 'recipeStep_word'})#做法步骤
        smalldoor = soup.find_all('div',attrs={'class': 'recipeTip'})#做菜小窍门
        picture = soup.select('.recipe_De_imgBox')#菜品照片链接

        for food in foods:
            urls = food.find('a')#查找标签
            name = urls.get_text()#获取标签内文本
            link = urls["href"]#获取链接
            food_material=[]
            str1=''
            str2=''
            #---------------------------获取原料文本
            for mat in material:
                cai = mat.find_all('li')
                for c in cai :
                    food_material.append(c.text.strip().replace('\n',''))
            for s in food_material:
                str1 =str1+s+','
            #----------------------------做菜步骤文本
            for ste in step:
                str2 =str2+ste.text+','
            #----------------------------菜品的图片链接
            for img in picture:
                image =img.find('img')
                img_link = image["src"]
            #-----------------------------菜品小窍门
            door=smalldoor[0].text.strip().replace('\n', '').replace('\r','')
            indexs = indexs+1
            # (2)准备数据
            data = (indexs, name,img_link ,link,str1,str2,door)
            # (3)操作
            try:
                cursor.execute(sql, data)
                conn.commit()
            except Exception as e:
                print('插入数据失败', e)
                conn.rollback()  # 回滚
            #htmls.append({'name':name,'url':link,'food_material':food_material})#传入数组
        print("正在读取第"+str(indexs)+"张网页----"+url)
    #print(htmls)
    # 关闭游标
    cursor.close()
    # 关闭连接
    conn.close()
if __name__ == '__main__':
    pa()
    print("数据读取完毕!!!")
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值