python爬虫实现简单的代理ip池
我们在普通的爬虫过程中经常遇到一些网站对ip进行封锁的
下面演示一下普通的爬虫程序
使用requests.get爬取数据
这段代码是爬取豆瓣排行榜的数据,使用f12来查看请求的url和数据格式
代码
def requestData():
# 爬取数据的url
url: str = "https://movie.douban.com/j/chart/top_list";
# 拼接url的请求参数,根据查看记录可以看到,start是页码
params: dict = {
"type": 24,
"start": 0,
"limit": 20,
"interval_id": "100:90",
"action": ""
}
# 请求头
headers: dict = {
"Cookie": 'bid=E_4gLcYLK28; douban-fav-remind=1; _pk_id.100001.4cf6=356001ac7c27c8a7.1721138006.; __yadk_uid=3UpO8BdyzrKbVCb1NOAtbGumsp4WCXwl; __utmz=30149280.1721147606.4.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); __utmz=223695111.1721147606.3.2.utmcsr=google|utmccn=(organic)|utmcmd=organic|utmctr=(not%20provided); ll="118281"; _vwo_uuid_v2=DD3C30CAFFD881E01CA061E73D9968226|23b8625e4550d2e13d1dacf343f40f5d; __utma=30149280.457246694.1704531990.1721147606.1721223349.5; __utmc=30149280; __utma=223695111.1791837078.1721138006.1721147606.1721223349.4; __utmb=223695111.0.10.1721223349; __utmc=223695111; _pk_ref.100001.4cf6=%5B%22%22%2C%22%22%2C1721223349%2C%22https%3A%2F%2Fwww.google.com%2F%22%5D; _pk_ses.100001.4cf6=1; ap_v=0,6.0; __utmt=1; __utmb=30149280.1.10.1721223349',
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Safari/537.36 Edg/121.0.0.0"
}
# 循环100次来获取数据
for i in range(1000):
params["start"] = 20 * i;
try: