爬虫日常练习

1.反webdriver自动化检测爬取

有的网站会有检测webdriver的反爬手段,这时候就需要做一些操作防止被网页检测到,下面直接给出固定代码段方法:

from selenium.webdriver.support.ui import WebDriverWait

# 配置 ChromeOptions 防止被检测
options = webdriver.ChromeOptions()
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument("--incognito")  # 使用隐身模式
# options.add_argument('--headless')  # 无头浏览,开发和调试时可以注释掉这行

# 创建 WebDriver 实例
driver = webdriver.Chrome(options=options)
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
    "source": """
    Object.defineProperty(navigator, 'webdriver', {
        get: () => undefined
    })
    """
})
wait = WebDriverWait(driver, 10)

网站实战反反爬:

from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.support.ui import WebDriverWait

# 配置 ChromeOptions 防止被检测
options = webdriver.ChromeOptions()
options.add_argument("--disable-blink-features=AutomationControlled")
options.add_experimental_option('excludeSwitches', ['enable-automation'])
options.add_experimental_option('useAutomationExtension', False)
options.add_argument("--incognito")  # 使用隐身模式
# options.add_argument('--headless')  # 无头浏览,开发和调试时可以注释掉这行

# 创建 WebDriver 实例
driver = webdriver.Chrome(options=options)
driver.execute_cdp_cmd("Page.addScriptToEvaluateOnNewDocument", {
    "source": """
    Object.defineProperty(navigator, 'webdriver', {
        get: () => undefined
    })
    """
})
wait = WebDriverWait(driver, 10)
driver.get('https://antispider1.scrape.center/')
time.sleep(5)
all_data_dict = {}
for i in range(10):
    titles = driver.find_elements(By.XPATH, '//*[@id="index"]/div[1]/div[1]/div/div/div/div[2]/a/h2')
    grades = driver.find_elements(By.XPATH, '//*[@id="index"]/div[1]/div[1]/div/div/div/div[3]/p[1]')
    data_dict = {}
    for title, grade in zip(titles, grades):
        data_dict[title.text] = grade.text

    all_data_dict.update(data_dict)
    print(f"页数 {i + 1}: {data_dict}")
    try:
        search_next = driver.find_element(By.XPATH, '//*[@id="index"]/div[2]/div/div/div/button[2]/i').click()
        time.sleep(3)
    except Exception as e:
        print(f"单击下一页出错:{e}")

# 打印所有页面的数据
print('-' * 80)
print("所有数据:", all_data_dict)
time.sleep(3)
driver.quit()

2.boss直聘多页爬取

from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import time

driver = webdriver.Chrome()
driver.get('https://www.zhipin.com/?ka=header-home-logo')
time.sleep(3)

searching = driver.find_element(By.XPATH, '//*[@id="wrap"]/div[3]/div/div[1]/div[1]/form/div[2]/p/input')
searching.send_keys('大数据爬虫')
time.sleep(3)
searching.send_keys(Keys.ENTER)
time.sleep(10)
for i in range(3):
    titles = driver.find_elements(By.XPATH, '//span[@class="job-name"]')
    prices = driver.find_elements(By.XPATH, '//span[@class="salary"]')
    for title, price in zip(titles, prices):
        print(f'正在爬取第{i+1}页......')
        print(f'职位名称:{title.text}')
        print(f'薪资:{price.text}')

    search_next = driver.find_element(By.XPATH, '//i[@class="ui-icon-arrow-right"]').click()
    time.sleep(3)

time.sleep(10)
driver.quit()
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值