import urllib.request
url = 'http://www.baidu.com'
# 进行请求
response = urllib.request.urlopen(url)
# print(response)
# 获取状态码
print(response.getcode())
# 获取URL
print(response.geturl())
# 获取请求头
print(response.getheaders())
# 读取响应
print(response.read().decode('UTF-8'))
# 下载数据 保存文件名称为baidu.html
urllib.request.urlretrieve(url, filename='baidu.html')
url转码:
import urllib.request
url = 'https://www.baidu.com/s?wd=%E8%BF%AA%E4%B8%BD%E7%83%AD%E5%B7%B4'
# url转码
# print(urllib.request.unquote(url))
# 输出结果:https://www.baidu.com/s?wd=迪丽热巴
# url = urllib.request.unquote(url)
# print(url)
headers = {
'User-Agent':' Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
# 构造请求对象,即模仿浏览器
request = urllib.request.Request(url, headers=headers)
# 发送请求
response = urllib.request.urlopen(request)
# print(response.read().decode('utf-8'))
# 写入文件
with open('baidu.html', 'wb') as f:
f.write(response.read())
post请求方法:
import urllib.request
import urllib.parse
import json
# 拿到地址
url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
headers = {
'User-Agent':' Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
# 获取到当前浏览器请求所携带的表单数据
formData = {
'cname': '',
'pid': '',
'keyword': '北京',
'pageIndex': 1,
'pageSize': 10,
}
# 对表单数据进行转码,urllib固定转码格式,requests不需要。
formData = urllib.parse.urlencode(formData).encode('UTF-8')
# 发送post请求
request = urllib.request.Request(url, data=formData, headers=headers)
response = urllib.request.urlopen(request)
# 返回json数据 转换成字典
data = json.loads(response.read().decode('UTF-8'))
# print(data['Table1'])
for l in data['Table1']:
print(l)
爬取多页数据,方法1:
import urllib.request
import urllib.parse
import json
# 拿到地址
url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
headers = {
'User-Agent':' Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
# 第1到10页
for i in range(1, 11):
# 获取到当前浏览器请求所携带的表单数据
formData = {
'cname': '',
'pid': '',
'keyword': '北京',
'pageIndex': i,
'pageSize': 10,
}
# 对表单数据进行转码
formData = urllib.parse.urlencode(formData).encode('UTF-8')
# 发送post请求
request = urllib.request.Request(url, data=formData, headers=headers)
response = urllib.request.urlopen(request)
# 返回json数据 转换成字典
data = json.loads(response.read().decode('UTF-8'))
# print(data['Table1'])
print(f'第{i}页数据')
for l in data['Table1']:
print(l)
爬取多页数据,方法2:仅改了formData里面的,pageSize为:100,其它不变,这种方法不一定能用。
import urllib.request
import urllib.parse
import json
# 拿到地址
url = 'http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword'
headers = {
'User-Agent':' Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/102.0.0.0 Safari/537.36'
}
# 获取到当前浏览器请求所携带的表单数据
formData = {
'cname': '',
'pid': '',
'keyword': '北京',
'pageIndex': 1,
'pageSize': 100,
}
# 对表单数据进行转码
formData = urllib.parse.urlencode(formData).encode('UTF-8')
# 发送post请求
request = urllib.request.Request(url, data=formData, headers=headers)
response = urllib.request.urlopen(request)
# 返回json数据 转换成字典
data = json.loads(response.read().decode('UTF-8'))
print(data['Table1'])
# for l in data['Table1']:
# print(l)