爬虫--爬取lagouwang

本文介绍了一个针对拉勾网上数据分析岗位招聘信息的爬虫实现方法。该爬虫能够自动抓取各岗位的详细信息,如公司名称、薪资范围、工作经验要求等,并将这些数据存储到数据库中。
import requests
import re
import json
from lxml import etree
from MySQLbao import MysqlHelper

#连接数据库储存数据

myhelper = MysqlHelper()
sql = "insert into lagouwang (company,salary,jingyan,xueli,description) values (%s,%s,%s,%s,%s)"



url = 'https://www.lagou.com/jobs/positionAjax.json?px=default&city=%E5%8C%97%E4%BA%AC&needAddtionalResult=false'

headers = {
'Accept':'application/json, text/javascript, */*; q=0.01',
'Accept-Language':'zh-CN,zh;q=0.9',
'Connection':'keep-alive',
'Cookie':'WEBTJ-ID=20180817144920-16546a544c6135-0dddc6f4ac7209-2711639-1049088-16546a544c7e0; _ga=GA1.2.10894523.1534488562; _gid=GA1.2.1461560699.1534488562; user_trace_token=20180817144931-b18e45cc-a1e9-11e8-a9f0-5254005c3644; PRE_HOST=www.baidu.com; LGUID=20180817144931-b18e4991-a1e9-11e8-a9f0-5254005c3644; X_HTTP_TOKEN=c7943969cb6c7080f4a9483619d27c0f; LGSID=20180817145005-c5b6633a-a1e9-11e8-a9f0-5254005c3644; PRE_UTM=m_cf_cpc_baidu_pc; PRE_SITE=https%3A%2F%2Fwww.baidu.com%2Fbaidu.php%3Fsc.a000000pGFTBfqUxhdsLQQHGQPtm0FlmZuPiBSmB96XvSF0gsSJ-XXYAsI1tvw-I6rUY8xMiEVD4LYRAoGpeAhME_SAWhCRZemkO175w__MtIII4Pgip-OVtf2JJ0RK51gw45Da6mz7kIi5VMJhFk5bFuvZ7wpyHeTJWAGh4FIYLrFlRYs.DD_NR2Ar5Od663rj6tJQrGvKD7ZZKNfYYmcgpIQC8xxKfYt_U_DY2yP5Qjo4mTT5QX1BsT8rZoG4XL6mEukmryZZjzL4XNPIIhExzLu2SMcM-sSxH9vX8ZuEsSXej_qT5o43x5ksSEzseld2s1f_U2qS4f.U1Yk0ZDqs2v4VnL30ZKGm1Yk0Zfqs2v4VnL30A-V5HcsP0KM5yF-TZns0ZNG5yF9pywd0ZKGujYk0APGujYs0AdY5HDsnHIxnH0krNtknjc1g1DsPjuxn1msnfKopHYs0ZFY5HTsP0K-pyfqnHfvr7tznH04P7tkrjRvn7tzPWndn7tznjbzr0KBpHYznjf0UynqP1m1nW03Pjnsg1Dsnj7xnNtknjFxn0KkTA-b5H00TyPGujYs0ZFMIA7M5H00mycqn7ts0ANzu1Ys0ZKs5H00UMus5H08nj0snj0snj00Ugws5H00uAwETjYs0ZFJ5HD0uANv5gKW0AuY5H00TA6qn0KET1Ys0AFL5HDs0A4Y5H00TLCq0ZwdT1Y1n16dPHTsnWR4Pjm3njTsP1cs0ZF-TgfqnHR1Pjf3rjRvPWcvnsK1pyfquH61P1c1njmsnj04m1Fhu0KWTvYqrRDdPHFDwRFAnYcznRDvP0K9m1Yk0ZK85H00TydY5H00Tyd15H00XMfqn0KVmdqhThqV5HKxn7tsg1Kxn0Kbmy4dmhNxTAk9Uh-bT1Ysg1Kxn7t1nHb4n1Nxn0Ksmgwxuhk9u1Ys0AwWpyfqn0K-IA-b5iYk0A71TAPW5H00IgKGUhPW5H00Tydh5H00uhPdIjYs0AulpjYs0Au9IjYs0ZGsUZN15H00mywhUA7M5HD0UAuW5H00mLFW5HT1n10%26ck%3D8803.1.121.324.567.324.563.824%26shh%3Dwww.baidu.com%26sht%3Dbaidu%26us%3D1.0.2.0.1.300.0%26ie%3Dutf-8%26f%3D8%26tn%3Dbaidu%26wd%3D%25E6%258B%2589%25E5%258B%25BE%25E7%25BD%2591%26rqlang%3Dcn%26inputT%3D2968%26bc%3D110101; PRE_LAND=https%3A%2F%2Fwww.lagou.com%2Flp%2Fhtml%2Fcommon.html%3Futm_source%3Dm_cf_cpc_baidu_pc%26m_kw%3Dbaidu_cpc_bj_e110f9_d2162e_%25E6%258B%2589%25E5%258B%25BE%25E7%25BD%2591; JSESSIONID=ABAAABAAAGFABEF0FA09730921B5CA3AE4E13D3F684A19A; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1534488563,1534488595,1534488606; index_location_city=%E5%85%A8%E5%9B%BD; TG-TRACK-CODE=search_code; SEARCH_ID=ab62d667074042ddbc0294a97e39152c; _gat=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1534490018; LGRID=20180817151348-1625b6a1-a1ed-11e8-a9f0-5254005c3644',
'Host':'www.lagou.com',
'Origin':'https://www.lagou.com',
'Referer':'https://www.lagou.com/jobs/list_%E6%95%B0%E6%8D%AE%E5%88%86%E6%9E%90?px=default&city=%E5%8C%97%E4%BA%AC',
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.84 Safari/537.36',
'X-Anit-Forge-Code':'0',
'X-Anit-Forge-Token':'None',
'X-Requested-With':'XMLHttpRequest',
    }

for page in range(1,31):
    form = {
    'first': 'false',
    'pn': page,
    'kd': '数据分析',
    }


    response = requests.post(url,data=form,headers=headers)
    # print(type(response))
    html_str = response.text
    # print(html_str)
  
    # 通过etree这个包下面的HTML函数, 就能够将element对象获取出来
    html_dict = json.loads(html_str)
    # print(html_dict)

    # #列表页的每条信息都携带者进入详情页的ID 每页有15个   positionId
    for j in range(15):
        list_url = html_dict['content']['positionResult']['result'][j]['positionId']
        # print(list_url)

        #拼接详情页的url
        detail_url = 'https://www.lagou.com/jobs/'+str(list_url)+'.html'
        # print(detail_url)
    #第一次测试 只爬取一页
    # detail_url = 'https://www.lagou.com/jobs/4605300.html'
        form1 = {
        'positionId': list_url,
        'pageSize': 500,
        }
        #开始发送第二次请求,获取详情页
        response = requests.post(detail_url,data=form1,headers=headers)
        detail_str = response.text
        # print(detail_str)
        html_ele = etree.HTML(detail_str)
        # print(html_ele)
        #公司
        company_err = html_ele.xpath('//div[@class="company"]')
        if company_err == None:
            break
        company = html_ele.xpath('//div[@class="company"]')[0].text
        # print(company)


        #工资
        salary = html_ele.xpath('//div[@class="position-head"]/div/div[1]/dd/p/span[1]')[0].text
        # print(salary)

        #经验
        jingyan = html_ele.xpath('//div[@class="position-head"]/div/div[1]/dd/p/span[3]')[0].text
        # print(jingyan)

        #学历
        xueli = html_ele.xpath('//div[@class="position-head"]/div/div[1]/dd/p/span[4]')[0].text
        # print(xueli)

        #职位描述
        pinjie = '//div[@class="content_l fl"]/dl[1]/dd[2]/div//text()'
        zhize = html_ele.xpath(pinjie)
       

        #添加到数据库
        data = (company,salary,jingyan,xueli,str(zhize))
        myhelper.execute_modify_sql(sql, data)

 

【顶级EI复现】计及连锁故障传播路径的电力系统 N-k 多阶段双层优化及故障场景筛选模型(Matlab代码实现)内容概要:本文介绍了名为《【顶级EI复现】计及连锁故障传播路径的电力系统 N-k 多阶段双层优化及故障场景筛选模型(Matlab代码实现)》的研究资源,重点围绕电力系统中连锁故障的传播机制,提出了一种N-k多阶段双层优化模型,并结合故障场景筛选方法提升系统安全性与鲁棒性。该模型通过Matlab代码实现,可用于模拟复杂电力系统在多重故障下的响应特性,支持对关键故障路径的识别与优化决策,适用于高水平科研复现与工程仿真分析。文中还列举了大量相关技术方向的配套资源,涵盖智能优化算法、电力系统管理、机器学习、路径规划等多个领域,并提供了网盘链接以便获取完整代码与资料。; 适合人群:具备电力系统、优化理论及Matlab编程基础的研究生、科研人员及从事能源系统安全分析的工程技术人员,尤其适合致力于高水平论文(如EI/SCI)复现与创新的研究者。; 使用场景及目标:①复现顶级期刊关于N-k故障与连锁传播的优化模型;②开展电力系统韧性评估、故障传播分析与多阶段防御策略设计;③结合YALMIP等工具进行双层优化建模与场景筛选算法开发;④支撑科研项目、学位论文或学术成果转化。; 阅读建议:建议读者按照文档提供的目录顺序系统学习,优先掌握双层优化与场景筛选的核心思想,结合网盘中的Matlab代码进行调试与实验,同时参考文中提及的智能算法与电力系统建模范例,深化对复杂电力系统建模与优化的理解。
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值