Python爬虫----------------爬取美食网菜谱(数据库版)
import requests
import pymysql
from bs4 import BeautifulSoup
#---------------------------爬取目标网页-----------
def pa():
indexs = 0
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'
}
# 建立连接
conn = pymysql.connect(host='localhost', user='root', password='root',db='spiderdb', port=3306, charset='utf8')
# 建立游标
cursor = conn.cursor()
# 数据库操作
# (1)定义一个格式化的sql语句
sql = 'insert into foods(id,foodname,picsrc,url,foodmaterial,foodstep,fooddoor) values(%s,%s,%s,%s,%s,%s,%s) '
for page in range(10,95888):#(95888-10)道菜
url = f'https://home.meishichina.com/recipe-{page}.html'
response = requests.get(url=url, headers=headers)
response.encoding='utf-8'#统一改成utf-8
soup =BeautifulSoup(response.text,'lxml')#将爬取的网页以字符串的形式传入
foods = soup.select('.recipe_De_title')#选中class选择器(菜名)
material = soup.find_all('div',attrs={'class': 'recipeCategory_sub_R'})#(材料)
step = soup.find_all('div',attrs={'class': 'recipeStep_word'})#做法步骤
smalldoor = soup.find_all('div',attrs={'class': 'recipeTip'})#做菜小窍门
picture = soup.select('.recipe_De_imgBox')#菜品照片链接
for food in foods:
urls = food.find('a')#查找标签
name = urls.get_text()#获取标签内文本
link = urls["href"]#获取链接
food_material=[]
str1=''
str2=''
#---------------------------获取原料文本
for mat in material:
cai = mat.find_all('li')
for c in cai :
food_material.append(c.text.strip().replace('\n',''))
for s in food_material:
str1 =str1+s+','
#----------------------------做菜步骤文本
for ste in step:
str2 =str2+ste.text+','
#----------------------------菜品的图片链接
for img in picture:
image =img.find('img')
img_link = image["src"]
#-----------------------------菜品小窍门
door=smalldoor[0].text.strip().replace('\n', '').replace('\r','')
indexs = indexs+1
# (2)准备数据
data = (indexs, name,img_link ,link,str1,str2,door)
# (3)操作
try:
cursor.execute(sql, data)
conn.commit()
except Exception as e:
print('插入数据失败', e)
conn.rollback() # 回滚
#htmls.append({'name':name,'url':link,'food_material':food_material})#传入数组
print("正在读取第"+str(indexs)+"张网页----"+url)
#print(htmls)
# 关闭游标
cursor.close()
# 关闭连接
conn.close()
if __name__ == '__main__':
pa()
print("数据读取完毕!!!")