python课程学习代码:使用动态IP实现爬虫

使用动态IP实现爬虫

1、抓取西刺代理网站的代理 ip
2、并根据指定的目标 url,对抓取到 ip 的有效性进行验证
3、最后存到指定的 path
一、主要函数

  • 文档处理
# 写入文档
def write(path,text):
    with open(path,'a', encoding='utf-8') as f:
        f.writelines(text)
        f.write('\n')
# 清空文档
def truncatefile(path):
    with open(path, 'w', encoding='utf-8') as f:
        f.truncate()
# 读取文档
def read(path):
    with open(path, 'r', encoding='utf-8') as f:
        txt = []
        for s in f.readlines():
            txt.append(s.strip())
    return txt
  • 计算时间差
# 计算时间差,格式: 时分秒
def gettimediff(start,end):
    seconds = (end - start).seconds
    m, s = divmod(seconds, 60)
    h, m = divmod(m, 60)
    diff = ("%02d:%02d:%02d" % (h, m, s))
    return diff
  • 获得请求头header
# 返回一个随机的请求头 headers
def getheaders():
    user_agent_list = [ \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1" \
        "Mozilla/5.0 (X11; CrOS i686 2268.111.0) AppleWebKit/536.11 (KHTML, like Gecko) Chrome/20.0.1132.57 Safari/536.11", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1092.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.6 (KHTML, like Gecko) Chrome/20.0.1090.0 Safari/536.6", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/19.77.34.5 Safari/537.1", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.9 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.0) AppleWebKit/536.5 (KHTML, like Gecko) Chrome/19.0.1084.36 Safari/536.5", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 5.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_8_0) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1063.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1062.0 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.1 Safari/536.3", \
        "Mozilla/5.0 (Windows NT 6.2) AppleWebKit/536.3 (KHTML, like Gecko) Chrome/19.0.1061.0 Safari/536.3", \
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24", \
        "Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/535.24 (KHTML, like Gecko) Chrome/19.0.1055.1 Safari/535.24"
    ]
    UserAgent=random.choice(user_agent_list)
    headers = {'User-Agent': UserAgent}
    return headers
  • 检查ip是否可用
def checkip(targeturl,ip):
    headers =getheaders()  # 定制请求头
    proxies = {"http": "http://"+ip, "https": "http://"+ip}  # 代理ip
    try:
        response=requests.get(url=targeturl,proxies=proxies,headers=headers,timeout=5).status_code
        if response == 200 :
            return True
        else:
            return False
    except:
        return False
  • 获取代理方法
# 免费代理 XiciDaili
def findip(type,pagenum,targeturl,path): # ip类型,页码,目标url,存放ip的路径
    list={'1': 'http://www.xicidaili.com/nt/', # xicidaili国内普通代理
          '2': 'http://www.xicidaili.com/nn/', # xicidaili国内高匿代理
          '3': 'http://www.xicidaili.com/wn/', # xicidaili国内https代理
          '4': 'http://www.xicidaili.com/wt/'} # xicidaili国外http代理
    url=list[str(type)]+str(pagenum) # 配置url
    headers = getheaders() # 定制请求头
    html=requests.get(url=url,headers=headers,timeout = 5).text
    soup=BeautifulSoup(html,'lxml')
    all=soup.find_all('tr',class_='odd')
    for i in all:
        t=i.find_all('td')
        ip=t[1].text+':'+t[2].text
        is_avail = checkip(targeturl,ip)
        if is_avail == True:
            write(path=path,text=ip)
            print(ip)
  • 多线程抓取ip入口
def getip(targeturl,path):
     truncatefile(path) # 爬取前清空文档
     start = datetime.datetime.now() # 开始时间
     threads=[]
     for type in range(4):   # 四种类型ip,每种类型取前三页,共12条线程
         for pagenum in range(3):
             t=threading.Thread(target=findip,args=(type+1,pagenum+1,targeturl,path))
             threads.append(t)
     print('开始爬取代理ip')
     for s in threads: # 开启多线程爬取
         s.start()
     for e in threads: # 等待所有线程结束
         e.join()
     print('爬取完成')
     end = datetime.datetime.now() # 结束时间
     diff = gettimediff(start, end)  # 计算耗时
     ips = read(path)  # 读取爬到的ip数量
     print('一共爬取代理ip: %s 个,共耗时: %s \n' % (len(ips), diff))

二、随机ip

import random
ips = ['120.92.74.237:3128','218.60.8.99:3129','113.200.56.13:8010']
ip = random.choice(ips)
print(ip)

三、数据库设计:MySQL

  • 设置 utf-8 防止乱码
set character_set_server = utf8;
set character_set_database = utf8;
set character_set_client = utf8;
set character_set_connection = utf8;
set character_set_results = utf8;
show variables like '%character%';
drop database if exists douban;
  • 电影
create table movie(
movie_id char(20) primary key,
movie_name char(30) not null,
movie_director_name char(100),
movie_writer char(100),
movie_actor char(255),
movie_type char(100),
movie_country char(100),
movie_language char(100),
mvoie_release_time date,
movie_length int,
movie_alias char(100),
imdb char(20)
);
  • 短评
create table short_comment(
short_comment_id char(20) primary key,
short_comment_writer char(20),
short_comment_score char(5),
short_comment_date date,
short_comment_content text
);

四、测试数据库

  • 电影是否存在
def is_exists_movie_id(movie_id):
    db = pymysql.connect(host, user, pwd, database)
    cursor=db.cursor()
    sql="select * from movie where movie_id=%s" % movie_id
    cursor.execute(sql)
    one=cursor.fetchone()
    db.commit()
    if(one is None):
        return False
    else:
        return True
  • 插入电影
- def insert_movie(movie):
    db = pymysql.connect(host, user,pwd, database)
    cursor = db.cursor()
    sql="insert into movie values('%s','%s','%s','%s','%s','%s','%s','%s','%s',%d,'%s','%s')" % movie
    if(is_exists_movie_id(movie[0])):
        pass
    else:
        cursor.execute(sql)
        print(sql)
        db.commit()
  • 获得电影信息并将电影信息插入数据库
def get_movie_tupple(url):
    url = 'https://movie.douban.com/subject/26683283/'
    from urllib import request
    from bs4 import BeautifulSoup
    response = request.urlopen(url)
    html = response.read().decode('utf-8')
    html = html[1:len(html) + 1]
    bs = BeautifulSoup(html, 'lxml')
    fileName = bs.h1.span.text  # 找到第一个span
    fileDIV = bs.find(id='info')
    spans = fileDIV.findAll('span')
    span_attrs = fileDIV.findAll(class_='attrs')
    directors_span = span_attrs[0]
    screenWriters_span = span_attrs[1]
    starings_span = span_attrs[2]
    directors_a = directors_span.findAll('a')
    director_str = ''
    for director_a in directors_a:
        director_str = director_str + director_a.text + ';'
    print('导演:' + director_str)
    screenWriters_a = screenWriters_span.findAll('a')
    screenWriter_str = ''
    for screenWriter_a in screenWriters_a:
        screenWriter_str = screenWriter_str + screenWriter_a.text + ';'
    print('编剧:' + screenWriter_str)
    starings_a = starings_span.findAll('a')
    staring_str = ''
    for staring_a in starings_a:
        staring_str = staring_str + staring_a.text + ';'
    print('主演:' + staring_str)
    file_type = ''
    file_type_spans = bs.findAll('span', {'property': 'v:genre'})
    for file_type_span in file_type_spans:
        file_type = file_type + file_type_span.text + ';'
    print(file_type)
    import re
    pattern = re.compile('制片国家/地区:(.*)')
    print('制片国家/地区:' + pattern.findall(fileDIV.text)[0])
    pattern = re.compile('语言:(.*)')
    print('语言:' + pattern.findall(fileDIV.text)[0])
    str='法国 / 德国 / 俄罗斯 / 立陶宛 / 荷兰 / 乌克兰 / 拉脱维亚'
    print(str.split('/'))
    movie=('26683283','温柔女子 Une Femme Douce ','谢尔盖·洛兹尼察','谢尔盖·洛兹尼察','瓦西利娜·马科夫采娃 / 谢尔盖·科列索夫 / 德米特里·贝科夫斯基 - 罗马绍夫 / 莉亚·阿赫扎科娃 / Vadim Dubovskiy / 罗扎·哈伊鲁林娜 / Nikolai Kolyada / 鲍里斯·卡莫尔津','剧情','法国 / 德国 / 俄罗斯 / 立陶宛 / 荷兰 / 乌克兰 / 拉脱维亚','俄语','2017 - 05 - 25(戛纳电影节) / 2017 - 08 - 16(法国)',143 ,'铁幕温柔女子(港) / 残酷的温柔(台) / A Gentle Creature / Кроткая','tt5618752')
    return movie
movie=('26683283','温柔女子 Une Femme Douce ','谢尔盖·洛兹尼察','谢尔盖·洛兹尼察','瓦西利娜·马科夫采娃 / 谢尔盖·科列索夫 / 德米特里·贝科夫斯基 - 罗马绍夫 / 莉亚·阿赫扎科娃 / Vadim Dubovskiy / 罗扎·哈伊鲁林娜 / Nikolai Kolyada / 鲍里斯·卡莫尔津','剧情','法国 / 德国 / 俄罗斯 / 立陶宛 / 荷兰 / 乌克兰 / 拉脱维亚','俄语','2017 - 05 - 25(戛纳电影节) / 2017 - 08 - 16(法国)',143 ,'铁幕温柔女子(港) / 残酷的温柔(台) / A Gentle Creature / Кроткая','tt5618752')
insert_movie(movie)

五、启动

if __name__ == '__main__':
    path = 'ip.txt' # 存放爬取ip的文档path
    targeturl = 'http://www.baidu.com' # 验证ip有效性的指定url
    getip(targeturl,path)
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

Saturn8523

你的鼓励就是我最大的动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值