python3-爬虫【requests】模块

网页请求

get请求的2种写法
1、requests.request(‘GET’, url)
2、requests.get(url)
import requests
# 发送请求,返回response对象
response = requests.request('GET', 'http://www.baidu.com')
# 状态码
print(response.status_code)
# 获取cookie
print(response.cookies)
# 编码方式(一般不准,要改的)
print(response.encoding)
# 响应头
print(response.headers)
# 响应内容的字符串形式
print(response.text)
# 响应内容的二进制形式
print(response.content)
# 返回json
print(response.json())

浏览器伪装

import requests
ua = 'Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko'
header = {'User-Agent': ua}
response = requests.get('http://www.baidu.com', headers=header)
print(response.status_code)

url编码

import requests
response = requests.get(
    url='https://www.baidu.com/baidu',
    headers={'User-Agent': 'Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;360SE)'},
    params={'wd': '爬虫', 'ie': 'utf-8'})
print(response.url)

示例:淘宝搜索+翻页

import requests
# 浏览器伪装
header = {'User-Agent': 'Mozilla/4.0(compatible;MSIE7.0;WindowsNT5.1;360SE)'}
# 搜索词
keyword = 'python人工智能'
# 原淘宝url
url = 'https://s.taobao.com/search'
for i in range(3):
    # 页码+搜索词
    page = i * 44
    kw = {'s': page, 'q': keyword}
    # 页面请求
    response = requests.get(url=url, headers=header, params=kw)
    print(response.url)

打印结果

https://s.taobao.com/search?s=0&q=python%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD
https://s.taobao.com/search?s=44&q=python%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD
https://s.taobao.com/search?s=88&q=python%E4%BA%BA%E5%B7%A5%E6%99%BA%E8%83%BD

post请求

import requests
url = 'http://httpbin.org/post'
d = {'key1': 'value1', 'key2': 'value2'}
r = requests.post(url, data=d)
print(r.text)

以json形式发送post请求

import requests, json
url = 'http://httpbin.org/post'
js = json.dumps({'key1': 'value1', 'key2': 'value2'})
r = requests.post(url, data=js)
print(r.text)

获取cookies

import requests
response = requests.get('https://www.youkuaiyun.com/')
cookies = response.cookies  # 获取cookie
print(cookies)  # <class 'requests.cookies.RequestsCookieJar'>
for k, v in cookies.items():  # 打印kv
    print('%20s | %s' % (k, v))
print(cookies.get_dict())  # 获取字典
print(requests.get(url, cookies=cookies))

会话维持

import requests
s = requests.Session()
s.get('http://httpbin.org/cookies/set/name/yellow')
r = s.get('http://httpbin.org/cookies')
print(r.text)
打印结果
{“cookies”: {“name”: “yellow”}}

代理IP

import requests
proxy = {'http': ''}
r = requests.get('http://httpbin.org/get', proxies=proxy).json()
print(r)
print(r['origin'])

认证设置

  • 有些代理需要用户名+密码
import requests
url = ''
account = ''
password = ''
r = requests.get(url, auth=(account, password))
print(r.status_code)

超时设置

float or tupple(t1,t2)
t1:发送时间(请求连接)
t2:接收数据等待的时间(连接成功后)【float时默认t2】
import requests
try:
    response = requests.get('https://www.taobao.com', timeout=0.1)
    print(response.status_code)
except Exception as e:
    print(e)

其它补充

下载大文件

stream

证书

cert

复制headers,转字典

import re
headers = '''

'''.strip()
headers = {k: v for k, v in re.findall('([a-zA-Z-]+): (.+)', headers)}
print(headers)

附录

encn
postn. 邮件、岗位;vt. 邮递、张贴
proxy代理人、委托书
cookie曲奇、小甜饼
status状态、地位
stream溪流
certificate证书、文凭
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

小基基o_O

您的鼓励是我创作的巨大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值