分析
由于百度图片获取的方式是采用ajax,所以你从正常的url链接只能提取到一个页面显示的图片,也就是前30张(还是前60张)
具体的分析是:你下拉页面滚动条,分析XHR的变化,就可以找到所需要的Json数据,比如说这个就是分析后找的Json数据
1.json连接:
2.参数:
可以看到上面的链接,word就是搜索关键词,pn是第几页,rn是每页多少图片(默认30)还可以看到其他的参数暂时可以不关心。
3.json数据
通过访问上面的json链接得到一份json数据,分析json数据,data数组中包含了图片信息,可以看到data数组的元素中有一个objURL的连接,
“objURL”:”ippr_z2C$qAzdH3FAzdH3Ft4w2jf_z&e3B4wvx_z&e3BvgAzdH3Fu5674AzdH3Fda8n8aAzdH3Fd8AzdH3F8b8ccaacas17ir6i1lf1rn_z&e3B3r2”,
replaceUrl中也有ObjURL,不过第一个objurl看起来是加密过的,百度一下发现 解密方法很简单,秘钥是一个字符的对应关系,有2种映射:
1.多个字符映射为一个字符,
‘_z2C$q’ => ‘:’
‘_z&e3B’ => ‘.’
‘AzdH3F’ => ‘/’
2.单个字符映射为单字符,字符映射表为
将objurl解密就可以得到图片的真正链接
def decode_url(self, url):
in_table = u'0123456789abcdefghijklmnopqrstuvw'
out_table = u'7dgjmoru140852vsnkheb963wtqplifca'
translate_table = string.maketrans(in_table, out_table)
mapping = {'_z2C$q': ':', '_z&e3B': '.', 'AzdH3F': '/'}
for k, v in mapping.items():
url = url.replace(k, v)
url = url.encode()
return url.translate(translate_table)
开干
好了有了上面的信息现在基本上可以开干了,我们只要一个循环,每次请求一页数据的json,然后解析json得到每一页中每张图片的url然后下载就可以了。这里请求页面我们用requests库。
# coding:UTF-8
import time
import os
from bs4 import BeautifulSoup
import requests
import string
global path
def download(url, filename,fromHost):
try:
ir = requests.get(url)
ir.raise_for_status()
if ir.status_code == 200:
filePathName = os.path.join(path, filename)
open(filePathName, 'wb').write(ir.content)
print "download %s suceese"%url
return True
except BaseException,e:
print 'download error :%s'%filename
print e.message
return False
def request(params):
headers = { "Accept":"text/html,application/xhtml+xml,application/xml;",
"Accept-Encoding":"gzip",
"Accept-Language":"zh-CN,zh;q=0.8",
"Referer":"http://http://www.baidu.com/",
"User-Agent":"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/42.0.2311.90 Safari/537.36"
}
def decode_url(url):
in_table = u'0123456789abcdefghijklmnopqrstuvw'
out_table = u'7dgjmoru140852vsnkheb963wtqplifca'
translate_table = string.maketrans(in_table, out_table)
mapping = {'_z2C$q': ':', '_z&e3B': '.', 'AzdH3F': '/'}
for k, v in mapping.items():
url = url.replace(k, v)
url = url.encode()
return url.translate(translate_table)
try:
url = "http://image.baidu.com/search/acjson"
response = requests.get(url, params=params,headers=headers)
response.raise_for_status()
response.encoding = response.apparent_encoding
jsons = response.json()['data']
for json in jsons:
image_urls = []
if 'objURL' in json.keys():
image_urls.append(decode_url(json['objURL']))
if 'replaceUrl' in json.keys() and len(json['replaceUrl']) == 2:
image_urls.append(json['replaceUrl'][1]['ObjURL'])
print len(image_urls)
for objUrl in image_urls:
filename = os.path.split(objUrl)[1].split('?')[0]
if(len(filename) != 0 and filename.find('.') >= 0):
fromHost = json['fromURLHost']
print 'Downloading from %s' % objUrl
if(download(objUrl, filename,fromHost)):
break
except BaseException,e:
print e.message
return "get url error"
def search(keyword, minpage, maxpage):
params = {
'tn': 'resultjson_com',
'word': keyword,
'queryWord':keyword,
'ie': 'utf-8',
'cg': '',
'ct':'201326592',
'fp':'result',
'cl':'2',
'lm':'-1',
'rn': '30',
'ipn':'rj'
};
for i in range(minpage, maxpage):
print 'Download page %d:'%i
params['pn'] = '%d' % (i * 30)
request(params)
print 'download end'
def start(keyword,startpage,endpage,inpath=''):
if len(inpath) == 0:
inpath = os.curdir + '/'+keyword
global path
path = inpath.decode('utf-8')
print 'download image to %s'%path
if os.path.exists(path) == False:
os.mkdir(path)
search(keyword, startpage, endpage)