Python爬虫--------------------爬取美食网菜谱
自从学会了Python,感觉自己的白嫖技能又升级了。废话不多说,直接上代码,这里六七万多道菜要全部爬下来得五六个小时,建议使用分布式爬虫Scrapy框架可以提高速度。
import requests
import json
import pymysql
import re
import xlwt
import xlrd
from openpyxl import load_workbook, workbook
from bs4 import BeautifulSoup
htmls = []
a=[]
b=[]
cs=[]
d=[]
e=[]
pic=[]
num=0
#---------------------------爬取目标网页-----------
def pa():
indexs = 0
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36'
}
wb = workbook.Workbook() # 创建Excel对象
ws = wb.active # 获取当前正在操作的表对象
# 往表中写入标题行,以列表形式写入!
ws.append(['编号','菜名','图片链接', 'url','所需材料','做法步骤','小窍门'])
for page in range(10,95888):#(95888-10)道菜
url = f'https://home.meishichina.com/recipe-{page}.html'
response = requests.get(url=url, headers=headers)
#print(response.encoding)#打印网页编码格式
response.encoding='utf-8'#统一改成utf-8
#htmls.append(response.text)
soup =BeautifulSoup(response.text,'lxml')#将爬取的网页以字符串的形式传入
foods = soup.select('.recipe_De_title')#选中class选择器(菜名)
material = soup.find_all('div',attrs={'class': 'recipeCategory_sub_R'})#(材料)
step = soup.find_all('div',attrs={'class': 'recipeStep_word'})#做法步骤
smalldoor = soup.find_all('div',attrs={'class': 'recipeTip'})#做菜小窍门
picture = soup.select('.recipe_De_imgBox')#菜品照片链接
for food in foods:
urls = food.find('a')#查找标签
name = urls.get_text()#获取标签内文本
link = urls["href"]#获取链接
food_material=[]
str1=''
str2=''
indexs = indexs + 1
#---------------------------获取原料文本
for mat in material:
cai = mat.find_all('li')
for c in cai :
food_material.append(c.text.strip().replace('\n',''))
for s in food_material:
str1 =str1+s+','
#----------------------------做菜步骤文本
for ste in step:
str2 =str2+ste.text+','
#----------------------------菜品的图片链接
for img in picture:
image =img.find('img')
img_link = image["src"]
a.append(name)
b.append(link)
cs.append(str1)
d.append(str2)
e.append(smalldoor[0].text.strip().replace('\n', '').replace('\r',''))#小窍门
pic.append(img_link)
htmls.append({'name':name,'url':link,'food_material':food_material})#传入数组
print("正在读取第"+str(indexs)+"/95878张网页----"+url)
for k in range(0, len(a)): # 按行插入数据到表中
ws.append([k + 1, a[k],pic[k], b[k], cs[k],d[k],e[k]])
print(htmls)
wb.save('F:/Book.xlsx')
#-----------------------------------------
def fen():
pa()
print(len(htmls))
if __name__ == '__main__':
fen()

1662

被折叠的 条评论
为什么被折叠?



