美团人气榜

该代码实现了一个名为LianjiaSpider的类,用于爬取美团网站上不同城市的销售排行榜数据。它通过修改URL中的城市代码来切换目标城市,使用requests库进行HTTP请求,lxml库解析HTML内容,然后提取并存储相关数据到CSV文件中,如ID、名称、周销量等。
# -*- coding:utf-8 -*-
# 仅需修改这个地方https://jn.lianjia.com/ershoufang/pg{}rs/   将jn换成你所在城市的拼写首字母小写
import requests
from lxml import etree
import time
import random
import csv
import requests
import json


class LianjiaSpider(object):
    def __init__(self):
        self.url = "https://mobilenext-web.meituan.com/api/rankingList/getSaleBoardList?cityId={}&boardType={}&cateId=10000&limit=10"

        self.headers = {
            "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/535.1 (KHTML, like Gecko) Chrome/14.0.835.163 Safari/535.1"}

    def get_page(self, url, i, j):
        print(url)
        res = requests.get(url=url, headers=self.headers)
        res.encoding = "utf-8"
        html = res.text
        #  if(html == '{"totalSize":0,"saleBoardPoiList":[],"boardDigest":null}'):
        #        html ={"totalSize":50,"saleBoardPoiList":[{"id":0,"name":"","weekSaleCount":"周销量 0","score":0,"avgPrice":0,"cateName":"","areaName":"","distance":"","rank":0,"frontImg":"https://img.meituan.net/msmerchant/","oneSentence":"","saleBoardPoiGroup":null,"saleBoardPoiCoupon":{"icon":"https://p0.meituan.net/travelcube/","content":""},"saleBoardPoiPay":null,"branchList":null}],"boardDigest":null}

        # print(i)
        # print(html)

        #  results_temp = html.replace('{"totalSize":50,"saleBoardDealList":', "").replace("}}]}", "")
        # results = results_temp + "}}]"
        #  print(results)

        self.parse_page(html, i, j)

    # print(html)
    # print(i)
    def parse_page(self, html, i, j):
        print(i)

        results = html[35:-20]
        # print(len(results))
        print(results)
        #  print(results.find("["))
        if (results.find("[") != 0):
            prefix = "["
            results = prefix + results
            print(results)
            print(len(results))

        for list in json.loads(results):
            # print(list)
            id = list["id"]
            # print(id)
            name = list["name"]
            # print(name)
            weekSaleCount = list["weekSaleCount"]
            score = list["score"]
            avgPrice = list["avgPrice"]
            cateName = list["cateName"]
            areaName = list["areaName"]
            distance = list["distance"]
            rank = list["rank"]
            frontImg = list["frontImg"]
            oneSentence = list["oneSentence"]

            if (i == 20):  cityId = "广州"
            if (i == 57):  cityId = "武汉"
            if (i == 105):  cityId = "哈尔滨"
            if (i == 66):  cityId = "沈阳"
            if (i == 59):  cityId = "成都"
            if (i == 55):  cityId = "南京"
            if (i == 42):  cityId = "西安"
            if (i == 116):  cityId = "长春"
            if (i == 96):  cityId = "济南"
            if (i == 50):  cityId = "杭州"

            if (j == 1):
                catId = "火锅"
            elif (j == 2):
                catId = "自助餐"
            elif (j == 3):
                catId = "烧烤龙虾"
            elif (j == 4):
                catId = "地方菜"
            elif (j == 5):
                catId = "异国料理"
            elif (j == 6):
                catId = "小吃快餐"
            elif (j == 7):
                catId = "甜点饮品"
            elif (j == 8):
                catId = "蛋糕"

            with open('meituan.csv', 'a', newline='', encoding='utf-8') as f:
                write = csv.writer(f)
                write.writerow(
                    [cityId, catId, id, name, weekSaleCount, score, avgPrice, cateName, areaName, distance, rank,
                     frontImg,
                     oneSentence])

    def main(self):
        # 20广州、香港118,165白山,170鹤岗
        cityId_lists = [20, 57, 105, 66, 59, 55, 42, 116, 96, 50]
        for i in cityId_lists:  # 第二个实例

            #  print(i)
            for j in range(1, 9):
                #     print(j)
                time.sleep(random.randint(3, 5))
                url = self.url.format(i, j)
                #  print(url)
                self.get_page(url, i, j)
        # print(j)


if __name__ == '__main__':
    start = time.time()
    spider = LianjiaSpider()
    spider.main()
    end = time.time()
    print("执行时间:%.2f" % (end - start))

好的,下面是解答。 首先,我们需要安装requests和beautifulsoup4这两个库。可以在pycharm的Terminal中使用以下命令安装: ``` pip install requests pip install beautifulsoup4 ``` 然后,我们编写以下程序来搜索并记录美团热销的销量及价格数据: ```python import requests from bs4 import BeautifulSoup url = "https://www.meituan.com/changecity/" headers = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.3"} res = requests.get(url, headers=headers) soup = BeautifulSoup(res.text, 'html.parser') city_list = soup.select('.city-list a') # 打印所有城市名称及链接 for city in city_list: print(city.text, city['href']) # 选择一个城市,获取美食热销页面链接 city_name = "上海" city_href = "" for city in city_list: if city.text == city_name: city_href = city['href'] break if not city_href: print("未找到对应城市") else: url = "https:" + city_href + "rzshangpin/" print(url) # 获取美食热销页面的HTML代码 res = requests.get(url, headers=headers) soup = BeautifulSoup(res.text, 'html.parser') sales_list = soup.select('.sales-item') for sales in sales_list: name = sales.select('.sales-name')[0].text.strip() sales_num = sales.select('.sales-num')[0].text.strip() price = sales.select('.price')[0].text.strip() print(name, sales_num, price) ``` 这个程序首先获取所有城市的链接,并打印出来。然后,选择一个城市(这里选择上海),获取美食热销页面链接,再获取该页面的HTML代码。最后,从HTML代码中解析出每个销售项目的名称、销量和价格,并打印出来。 你可以根据需求对程序进行修改,比如将结果写入到文件中等。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值