爬虫--json链接的演示

本文通过实例演示了如何使用Python爬虫抓取并解析网页中的JSON链接内容,详细讲解了请求库的使用以及JSON数据的处理技巧,旨在帮助读者掌握从网页中获取和处理JSON数据的方法。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import requests
import time
import numpy as np
import pandas as pd

# 基础链接
base_url = 'https://www.lagou.com/jobs/positionAjax.json?needAddtionalResult=false'

# 请求头
headers = {
        "Cookie": "user_trace_token=20180806162737-937aceec-9952-11e8-a341-5254005c3644; LGUID=20180806162737-937ad172-9952-11e8-a341-5254005c3644; JSESSIONID=ABAAABAABEEAAJAE50BCF139F0736172F1F0188EC151863; _gat=1; PRE_UTM=m_cf_cpt_baidu_pc; PRE_HOST=bzclk.baidu.com; PRE_SITE=http%3A%2F%2Fbzclk.baidu.com%2Fadrc.php%3Ft%3D06KL00c00f7Ghk60yUKm0FNkUsjkuPdu00000PW4pNb00000LCecjM.THL0oUhY1x60UWY4rj0knj03rNqbusK15yDLnWfkuWN-nj0sn103rHm0IHdDPbmzPjI7fHn3f1m3PDnsnH9anDFArH6LrHm3PHcYf6K95gTqFhdWpyfqn101n1csPHnsPausThqbpyfqnHm0uHdCIZwsT1CEQLILIz4_myIEIi4WUvYE5LNYUNq1ULNzmvRqUNqWu-qWTZwxmh7GuZNxTAn0mLFW5HDLP1Rv%26tpl%3Dtpl_10085_15730_11224%26l%3D1500117464%26attach%3Dlocation%253D%2526linkName%253D%2525E6%2525A0%252587%2525E9%2525A2%252598%2526linkText%253D%2525E3%252580%252590%2525E6%25258B%252589%2525E5%25258B%2525BE%2525E7%2525BD%252591%2525E3%252580%252591%2525E5%2525AE%252598%2525E7%2525BD%252591-%2525E4%2525B8%252593%2525E6%2525B3%2525A8%2525E4%2525BA%252592%2525E8%252581%252594%2525E7%2525BD%252591%2525E8%252581%25258C%2525E4%2525B8%25259A%2525E6%25259C%2525BA%2526xp%253Did%28%252522m6c247d9c%252522%29%25252FDIV%25255B1%25255D%25252FDIV%25255B1%25255D%25252FDIV%25255B1%25255D%25252FDIV%25255B1%25255D%25252FH2%25255B1%25255D%25252FA%25255B1%25255D%2526linkType%253D%2526checksum%253D220%26ie%3Dutf8%26f%3D8%26ch%3D2%26tn%3D98010089_dg%26wd%3D%25E6%258B%2589%25E5%258B%25BE%25E7%25BD%2591%26oq%3D%25E6%258B%2589%25E5%258B%25BE%25E7%25BD%2591%26rqlang%3Dcn%26oe%3Dutf8; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2F%3Futm_source%3Dm_cf_cpt_baidu_pc; _putrc=347EB76F858577F7; login=true; unick=%E6%9D%8E%E5%87%AF%E6%97%8B; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=63; TG-TRACK-CODE=index_search; _gid=GA1.2.1110077189.1507624453; _ga=GA1.2.1827851052.1507624453; LGSID=20171011082529-afc7b124-ae1a-11e7-87db-525400f775ce; LGRID=20171011082545-b94d70d5-ae1a-11e7-87db-525400f775ce; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1507444213,1507624453,1507625209,1507681531; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1507681548; SEARCH_ID=e420ce4ae5a7496ca8acf3e7a5490dfc; index_location_city=%E5%8C%97%E4%BA%AC",
        "Host": "www.lagou.com",
        'Origin': 'https://www.lagou.com',
        'Referer': 'https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?labelWords=&fromSearch=true&suginput=',
        'User-Agent': 'Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/53.0.2785.104 Safari/537.36 Core/1.53.3408.400 QQBrowser/9.6.12028.400'
}
# 代理IP
proxies = {
    'http': 'http://139.0.28.18:8080',
           }
# 数据集合
infos = {
    # 城市
    'city': [],
    # 具体地区
    'district': [],
    # 公司名字
    'companyFullName': [],
    # 职业名称
    'positionName': [],
    # 薪水
    'salary': [],
    # 工作经验
    'workYear': [],
    # 公司规模
    'companySize': [],
    # 教育程度
    'education': [],
    # 上市
    'financeStage': [],
    # 公司领域
    'industryField': [],
    # 是否全职
    'jobNature': [],
    # 福利
    'positionAdvantage': [],

}
"""

"""
# 翻页,事先调查总页码数
for i in range(1, 31):
    print(i)
    # 休眠一段时间
    time.sleep(np.random.rand()*15)
    # 链接拼接 传递参数
    data = {
            'first': 'false',
            'pn': str(i),
            'kd': '数据分析师',
            }#这是请求网址的一些参数
    # 请求链接
    response = requests.post(url=base_url, data=data, headers=headers, proxies=proxies)
    # json化数据
    html = response.json()
    # 找到信息结点
    results = html['content']['positionResult']['result']
    for result in results:
        infos['city'].append(result['city'])
        infos['district'].append(result['district'])
        infos['companyFullName'].append(result['companyFullName'])
        infos['companySize'].append(result['companySize'])
        infos['education'].append(result['education'])
        infos['financeStage'].append(result['financeStage'])
        infos['industryField'].append(result['industryField'])
        infos['jobNature'].append(result['jobNature'])
        infos['positionAdvantage'].append(result['positionAdvantage'])
        infos['positionName'].append(result['positionName'])
        infos['salary'].append(result['salary'])
        infos['workYear'].append(result['workYear'])
# 转化数据
ddata = pd.DataFrame(infos)
# 保存为CSV文件
ddata.to_csv('lagou.csv', index=False, encoding='gb18030')
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值