1.简单网页
import urllib.request
file = urllib.request.urlopen("https://www.jd.com")
data = file.read()
print(data)
fhandle = open("E:/python/1_1.html", "wb")
fhandle.write(data)
fhandle.close()
print(file.getcode())
print(file.geturl())
2.模拟浏览器
import urllib.request
url = "https://blog.youkuaiyun.com/java_zhangshuai/article/details/81749208"
headers = ("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
opener = urllib.request.build_opener()
opener.addheaders = [headers]
data = opener.open(url).read()
print(data)
fhandle = open("E:/python/2_1.html", "wb")
fhandle.write(data)
fhandle.close()
3.http请求
import urllib.request
keywd = "hello"
keywd = urllib.request.quote(keywd)
url = "http://www.baidu.com/s?wd=" + keywd
req = urllib.request.Request(url)
data = urllib.request.urlopen(req).read()
fhandle = open("E:/python/3_1.html", "wb")
fhandle.write(data)
fhandle.close()
import urllib.parse
url = "http://www.iqianyue.com/mypost"
data = {"name": "zhangsan", "pass": "zhangsanpass"}
postdata = urllib.parse.urlencode(data).encode("utf-8")
for x in range(1, 3):
try:
req = urllib.request.Request(url, postdata)
req.add_header("User-Agent",
"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36")
data = urllib.request.urlopen(req).read()
fhandle = open("E:/python/3_2.html", "wb")
fhandle.write(data)
fhandle.close()
print(len(data))
except Exception as e:
print("出现异常--->"+str(e))
4.爬取某电商商品列表下的图片集合
import urllib.request
import re
def craw(url, page):
html1 = urllib.request.urlopen(url).read()
html1 = str(html1)
pat1 = '<div id="plist".+? <div class="page clearfix">'
result1 = re.compile(pat1).findall(html1)
result1 = result1[0]
pat2 = '<img width="220" height="220" data-img="1" src="//(.+?\.jpg)">'
imagelist = re.compile(pat2).findall(result1)
x = 1
for imageurl in imagelist:
print(imageurl)
imagename = "E:/python/爬虫/" + str(page) + str(x) + ".jpg"
imageurl = "https://" + imageurl
try:
urllib.request.urlretrieve(imageurl, filename=imagename)
except:
x += 1
x += 1
for i in range(1, 10):
url = "https://list.jd.com/list.html?cat=9192,12632,12633&page=" + str(i)
craw(url, i)