根据关键字,进行百度搜索爬虫(比如图片)
#!/usr/bin/env python
# -*- coding:utf-8 -*-
# function:爬取百度链接的图片(只需要传入要爬取的关键字即可)
import os
import re
import urllib
import json
import socket
import urllib.request
import urllib.parse
import urllib.error
# 设置超时
import time
timeout = 5
socket.setdefaulttimeout(timeout)
class Crawler:
# 睡眠时长
__time_sleep = 0.1
__amount = 0
__start_amount = 0
__counter = 0
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20100101 Firefox/23.0'}
# 获取图片url内容等
# t 下载图片时间间隔
def __init__(self, t=0.1):
self.time_sleep = t
# 获取后缀名
def get_suffix(self, name):
m = re.search(r'\.[^\.]*$', name)
if m.group(0) and len(m.group(0)) <= 5:
return m.group(0)
else:
return '.jpeg'
# 获取referrer,用于生成referrer
def get_referrer(self, url):
par = urllib.parse.urlparse(url)
if par.scheme:
return par.scheme + '://' + par.netloc
else:
return par.netloc
# 保存图片(路径为当前项目路径下)
def save_image(self, rsp_data, word):
if not os.path.exists("./" + word):
os.mkdir("./" + word)
# 判断名字是否重复,获取图片长度 (默认的话,我从2000开始计算,因为之前已经有2000张图片了)
self.__counter = len(os.listdir('./' + word)) + 1 + 3579
for image_info in rsp_data['imgs']:
try:
time.sleep(self.time_sleep)
suffix = self.get_suffix(image_info['objURL'])
# 指定UA和referrer,减少403
refer = self.get_referrer(image_info['objURL'])
opener = urllib.request.build_opener()
opener.addheaders = [
('User-agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:55.0) Gecko/20100101 Firefox/55.0'),
('Referer', refer)
]
urllib.request.install_opener(opener)
# 保存图片
urllib.request.urlretrieve(image_info['objURL'], './' + word + '/' + str(self.__counter) + str(suffix))
except urllib.error.HTTPError as urllib_err:
print(urllib_err)
continue
except Exception as err:
time.sleep(1)
print(err)
print("产生未知错误,放弃保存")
continue
else:
print("图+1,已有" + str(self.__counter) + "张图")
self.__counter += 1
return
# 开始获取
def get_images(self, word='风景'):
search = urllib.parse.quote(word)
# pn int 图片数
pn = self.__start_amount
while pn < self.__amount:
url = 'http://image.baidu.com/search/avatarjson?tn=resultjsonavatarnew&ie=utf-8&word=' + search + '&cg=girl&pn=' + str(
pn) + '&rn=60&itg=0&z=0&fr=&width=&height=&lm=-1&ic=0&s=0&st=-1&gsm=1e0000001e'
# 设置header防ban
try:
time.sleep(self.time_sleep)
req = urllib.request.Request(url=url, headers=self.headers)
page = urllib.request.urlopen(req)
rsp = page.read().decode('unicode_escape')
except UnicodeDecodeError as e:
print(e)
print('-----UnicodeDecodeErrorurl:', url)
except urllib.error.URLError as e:
print(e)
print("-----urlErrorurl:", url)
except socket.timeout as e:
print(e)
print("-----socket timout:", url)
else:
# 解析json
rsp_data = json.loads(rsp)
self.save_image(rsp_data, word)
# 读取下一页
print("下载下一页")
pn += 60
finally:
page.close()
print("下载任务结束")
return
def start(self, word, spider_page_num=1, start_page=1):
"""
爬虫入口
:param word: 抓取的关键词
:param spider_page_num: 需要抓取数据页数 总抓取图片数量为 页数x60
:param start_page:起始页数
:return:
"""
self.__start_amount = (start_page - 1) * 60
self.__amount = spider_page_num * 60 + self.__start_amount
self.get_images(word)
if __name__ == '__main__':
crawler = Crawler(0.05) # 抓取延迟为 0.05
# 抓取关键词为 “美女”,总数为 1 页(即总共 1*60=60 张),开始页码为 2
# crawler.start('风景', 10, 2)
# 抓取关键词为 “二次元 美女”,总数为 10 页(即总共 10*60=600 张),起始抓取的页码为 1
crawler.start('风景图片大全', 100, 1)
# 抓取关键词为 “帅哥”,总数为 5 页(即总共 5*60=300 张)
# crawler.start('帅哥', 5)
python绘制折线图
# 绘制折线图
'''
import numpy as np
import matplotlib.pyplot as plt
#X轴,Y轴数据
x = [0,1,2,3,4,5,6]
y = [0.3,0.4,2,5,3,4.5,4]
plt.figure(figsize=(8,4)) # 创建绘图对象
plt.plot(x,y,"b--",linewidth=1) # 在当前绘图对象绘图(X轴,Y轴,蓝色虚线,线宽度)
plt.xlabel("Time(s)") #X 轴标签
plt.ylabel("Volt") # Y轴标签
plt.title("Line plot") # 图标题
plt.show() #显示图
plt.savefig("line.jpg") # 保存图
'''
import matplotlib.pyplot as plt
# 支持中文
from pylab import *
mpl.rcParams['font.sans-serif'] = ['SimHei']
names = ['BR', 'CLR', 'ML-LOC', 'RAKEL', 'V-GLLCBN']
x = range(len(names))
y = [0.1326, 0.1531, 0.1627, 0.1564, 0.2062]
y1 = [3.8942/9, 4.2231/9, 4.2483/9, 4.2411/9, 4.7556/9]
y2 = [0.2513, 0.2801, 0.2832, 0.2792, 0.2905]
y3 = [0.6412, 0.6321, 0.6243, 0.6288, 0.6172]
y4 = [0.4681, 0.604, 0.6910, 0.6332, 0.9704]
# plt.plot(x, y, 'ro-')
# plt.plot(x, y1, 'bo-')
# pl.xlim(-1, 11) # 限定横轴的范围
# pl.ylim(-1, 110) # 限定纵轴的范围
plt.plot(x, y, marker='o', mec='r', mfc='w', label=u'汉明损失曲线图')
plt.plot(x, y1, marker='o', ms=10, label=u'覆盖率曲线图')
plt.plot(x, y2, marker='o', ms=10, label=u'排名损失曲线图')
plt.plot(x, y3, marker='o', ms=10, label=u'平均精度曲线图')
plt.plot(x, y4, marker='o', ms=10, label=u'平均预测时间曲线图')
# 让图例生效
plt.legend()
plt.xticks(x, names, rotation=0)
plt.margins(0.2)
plt.subplots_adjust(bottom=0.15)
# X轴标签
plt.xlabel(u"对比算法")
# Y轴标签
plt.ylabel("数值")
# 标题
plt.title("随机标记30%数据集作为训练集的不同算法性能比较")
# 显示图片
plt.show()