有些时候我们需要从网上批量下载一些实验数据,编写网页爬虫进行下载可以帮我们节省很多时间,使得办事效率更高,下面是整理的一些基本爬虫程序的代码,其中有些参数得根据计算机浏览器的参数而做适当的调整。
1 批量获取IP地址的爬虫程序:
import urllib.request
import re
def url_open(url):
req = urllib.request.Request(url)
req.add_header('User Agent', 'Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36')
page = urllib.request.urlopen(req)
html = page.read().decode('utf-8') #有时会有编码错误,可能跟网页的编码有关
return html
def getIp(html):
p = r'(?:(?:[0,1]?\d?\d|2[0-4]\d|25[0-5])\.){3}(?:[0,1]?\d?\d|2[0-4]\d|25[0-5])'
iplist = re.findall(p, html)
for each in iplist:
print(each)
if __name__ == '__main__':
url = 'http://www.youdaili.net/Daili/guonei/3661.html'
#url = 'http://cn-proxy.com'
getIp(url_open(url))
2 批量下载妹子图:
import urllib.request
import os
def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36')
response = urllib.request.urlopen(req) # 修改header,避开浏览器的检查
html = response.read()
return html
def get_page(url):
html = url_open(url).decode('utf-8')
a = html.find('current-comment-page') + 23 # 需要加一个偏移量
b = html.find(']', a)
return html[a:b]
def find_imgs(url):
html = url_open(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')
while a != -1:
b = html.find('.jpg', a, a+255)
if b != -1:
img_addrs.append(html[a+9:b+4]) # 需要加一个偏移量
else:
b = a + 9
a = html.find('img src=', b)
for i in range(3): # 删除后面几个错误的地址
img_addrs.pop()
return img_addrs
def save_imgs(folder, img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
print(filename)
with open(filename,'wb') as f:
img = url_open(each)
f.write(img)
def download_mm(folder='OOXX1', pages=5):
os.mkdir(folder)
os.chdir(folder)
url = "http://jandan.net/ooxx/"
page_num = int(get_page(url))
for i in range(pages):
page_num -= i
page_url = url + 'page-' + str(page_num) + '#comments'
img_addrs = find_imgs(page_url)
save_imgs(folder, img_addrs)
if __name__ == '__main__':
download_mm()
3 利用正则表达式下载妹子图:
import urllib.request
import os
import re
def url_open(url):
req = urllib.request.Request(url)
req.add_header('User-Agent','Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/43.0.2357.130 Safari/537.36')
response = urllib.request.urlopen(req) # 修改header,避开浏览器的检查
html = response.read().decode('utf-8')
return html
def get_imgs(html):
#p = r'bpic="([^"]+\.jpg)"' # 大图
p = r'data-original="([^"]+\.jpg)"' # 小图
imglist = re.findall(p,html)
'''
for each in imglist:
print(each)
'''
folder = "mm1" # 建一个文件夹存储图片
os.mkdir(folder)
os.chdir(folder)
for each in imglist:
filename = each.split('/')[-1]
print(filename)
urllib.request.urlretrieve(each, filename, None) # 检索图片并下载到当前目录
if __name__ == '__main__':
# url = "http://tieba.baidu.com/f?kw=%E5%A6%B9%E5%AD%90%E5%9B%BE&ie=utf-8"
url = "http://tieba.baidu.com/f?kw=%E5%A6%B9%E5%AD%90%E5%9B%BE&fr=search"
get_imgs(url_open(url))