import urllib.request
import os
import random
def open_url(url):
req = urllib.request.Request(url)
req.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 Edge/16.16299")
# 使用代理ip
# proxiex=['119.6.144.70:81','111.1.36.9:80','203.144.144.162.8080']
# proxy=random.choice(proxiex)
#
# proxy_support=urllib.request.ProxyHandler({'http':proxy})
# opener=urllib.request.build_opener(proxy_support)
# urllib.request.install_opener(opener)
response = urllib.request.urlopen(req)
html = response.read()
return html
def get_page(url):
pass
def find_imgs(url):
html = open_url(url).decode('utf-8')
img_addrs = []
a = html.find('img src=')
# //wx1.sinaimg.cn/mw600/0076BSS5ly1g8v93qvajhj30bh0kumy8.jpg
# //wx2.sinaimg.cn/large/0076BSS5ly1g8v998imasg306y07yx6q.gif
while a != -1:
b = html.find('.jpg', a, a + 100)
if b != -1:
img_addrs.append(html[a + 9:b + 4])
else:
b = a + 9
a = html.find('img src=', b)
return img_addrs
def save_imgs(img_addrs):
for each in img_addrs:
filename = each.split('/')[-1]
with open(filename, 'wb') as f:
# <img src="http://wx4.sinaimg.cn/mw600/0076BSS5ly1g8v6cc4mo0j30u011i0xo.jpg">
# //wx4.sinaimg.cn/mw600/0076BSS5ly1g8v6cc4mo0j30u011i0xo.jpg
url = "http:" + each
f.write(open_url(url))
def download(folder="ooxxxx", pages=['w', 'g', 'Q', 'A']):
os.mkdir(folder)
os.chdir(folder)
url = "http://jandan.net/ooxx"
for i in pages:
# http://jandan.net/ooxx/MjAxOTExMTItNw==#comments
page_url = url + "/MjAxOTExMTItN" + 'w' + "==#comments"
img_addrs = find_imgs(page_url)
save_imgs(img_addrs)
if __name__ == '__main__':
download()
print('OK')