def get_data(url):
# 这里用了with as 即完成之后自动close(),释放资源
with request.urlopen(url) as f:
data = f.read()
# print('Status:', f.status, f.reason) #状态码
# for k, v in f.getheaders():
# print('%s: %s' % (k, v)) #遍历报文头
# print('Data:', data.decode('utf-8')) #decode()以 encoding 指定的编码格式解码字符串
# with open('/Users/Hints/AppData/Local/Programs/Python/text.txt','w') as p:
# p.write(data.decode('utf-8')) #将捕获的数据保存在文件中
return data.decode('utf-8')
reg = r'src="(.+?\.jpg)" width' #这个是关键
reg_img = re.compile(reg) #正则表达式都要编译
imglist = reg_img.findall(get_data('http://tieba.baidu.com/p/1753935195'))
x = 0
for img in imglist:
request.urlretrieve(img,'/Users/Hints/AppData/Local/Programs/Python/img/%s.jpg' %x)
x+=1
其中我认为爬虫的精髓在于有一个好的正则表达式。