# 爬取某代理网站的ip并且校检是否可以用
import requests
import re
# 获取ip
def get_ip(page):
if page==1:
url='https://www.kuaidaili.com/free/fps/'
else:
url=f'https://www.kuaidaili.com/free/fps/{page}'
headers={
'user-agent':
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/128.0.0.0 Safari/537.36 Edg/128.0.0.0'
}
res=requests.get(url,headers=headers)
content=res.text
con=re.findall("const fpsList = (.*)",content)
if con==[]:
del con
else:
data=con[0]
data=data.split('[')[1].split(']')[0]
data=eval(data)
data=data[0]
ip=data['ip']
port=data['port']
chick(ip,port)
# 对ip进行校检
def chick(ip,port):
try:
dic={'http':f"{ip}:{port}"}
res = requests.get(url="http://www.hao123.com", timeout=2, proxies=dic,verify=False)
if res.status_code==200:
print('有效')
print(ip)
save_ip(ip , port)
else:
print('无效')
return False
except Exception as e:
print(e)
return False
# 保存ip和prot
def save_ip(ip,prot):
print(ip)
with open('ip.txt','a') as f:
f.write(ip+':'+prot+'\n')
if __name__ == '__main__':
for page in range(1,6181):
get_ip(page)