爬虫实战 安装selenium并学习和学习IP相关知识

本文详细介绍使用Selenium进行邮箱登录的自动化操作流程,并分享如何通过爬虫技术抓取代理IP,建立有效的IP池,以应对网站反爬策略。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

安装selenium并学习

登陆邮箱代码如下
from selenium import webdriver
from selenium.webdriver import ActionChains
import time

browser = webdriver.Chrome()
try:
url = ‘http://mail.163.com/
browser.get(url)
time.sleep(5)
login_em = browser.find_element_by_id(‘switchAccountLogin’)
login_em.click()
browser.switch_to.frame(0)
time.sleep(5)
email = browser.find_element_by_name(‘email’)
email.send_keys(‘13429886885@163.com’)#将自己的邮箱地址输入到邮箱账号框中
password = browser.find_element_by_name(‘password’)#找到密码输入框
password.send_keys(‘13966277267’)#输入自己的邮箱密码
login_em = browser.find_element_by_id(‘dologin’)#找到登陆按钮
login_em.click()
time.sleep(10)
finally:
pass
IP池扩展代码如下:
from bs4 import BeautifulSoup
import requests
import time

def open_proxy_url(url):
user_agent = ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36’
headers = {‘User-Agent’: user_agent}
try:
r = requests.get(url, headers = headers, timeout = 20)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print(‘无法访问网页’ + url)

def get_proxy_ip(response):
proxy_ip_list = []
soup = BeautifulSoup(response, ‘html.parser’)
proxy_ips = soup.find(id = ‘ip_list’).find_all(‘tr’)
for proxy_ip in proxy_ips:
if len(proxy_ip.select(‘td’)) >=8:
ip = proxy_ip.select(‘td’)[1].text
port = proxy_ip.select(‘td’)[2].text
protocol = proxy_ip.select(‘td’)[5].text
if protocol in (‘HTTP’,‘HTTPS’,‘http’,‘https’):
proxy_ip_list.append(f’{protocol}?/{ip}:{port}’)
return proxy_ip_list
def open_url_using_proxy(url, proxy):
user_agent = ‘Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.119 Safari/537.36’
headers = {‘User-Agent’: user_agent}
proxies = {}
if proxy.startswith((‘HTTPS’,‘https’)):
proxies[‘https’] = proxy
else:
proxies[‘http’] = proxy

try:
    r = requests.get(url, headers = headers, proxies = proxies, timeout = 10)
    r.raise_for_status()
    r.encoding = r.apparent_encoding
    return (r.text, r.status_code)
except:
    print('无法访问网页' + url)
    print('无效代理IP: ' + proxy)
    return False

def check_proxy_avaliability(proxy):
url = ‘http://www.baidu.com
result = open_url_using_proxy(url, proxy)
VALID_PROXY = False
if result:
text, status_code = result
if status_code == 200:
r_title = re.findall(’.*’, text)
if r_title:
if r_title[0] == ‘百度一下,你就知道’:
VALID_PROXY = True
if VALID_PROXY:
check_ip_url = ‘https://jsonip.com/
try:
text, status_code = open_url_using_proxy(check_ip_url, proxy)
except:
return

        print('有效代理IP: ' + proxy)
        with open('valid_proxy_ip.txt','a') as f:
            f.writelines(proxy)
        try:
            source_ip = json.loads(text).get('ip')
            print(f'源IP地址为:{source_ip}')
            print('='*40)
        except:
            print('返回的非json,无法解析')
            print(text)
else:
    print('无效代理IP: ' + proxy)

if name == ‘main’:
proxy_url = ‘https://www.xicidaili.com/
text = open_proxy_url(proxy_url)
proxy_ip_filename = ‘proxy_ip.txt’
with open(proxy_ip_filename, ‘w’) as f:
f.write(text)
text = open(proxy_ip_filename, ‘r’).read()
proxy_ip_list = get_proxy_ip(text)
#print(proxy_ip_list)
for proxy in proxy_ip_list:
check_proxy_avaliability(proxy)

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值