python的requests库和xpath的入门使用

本文介绍了Python的requests库,包括其安装、手动伪造请求头和POST请求的使用。同时,文章也讲解了XPath的基础知识,如XML路径语言的用途、在HTML中的应用,以及在PyCharm中使用XPath所需的lxml库的安装。推荐使用xpathhelper工具辅助学习。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

requests库

requests 库底层封装urllib
安装:
pip install requests

import requests
import chardet
url = "http://www.sina.com.cn"
#响应头
response = requests.get(url=url)
#获取响应头的编码
print(response.encoding)
#新浪的编码
print(chardet.detect(response.content))

ISO-8859-1 -------- ladin1编码

import requests
import chardet
url = "http://www.sina.com.cn"
#响应头
response = reque
#获取响应头的编码
# print(response.encoding)
#新浪的编码
# print(chardet.detect(response.content))
# print(response.text)
charset = chardet.detect(response.content).get("encoding")
print(charset)
response.encoding = charset
print(response.text)
with open("sina.html","w",encoding=charset) as f:
f.write(response.text)

需求:爬取新浪页面的图片

import re
import requests
import chardet
url = "http://www.sina.com.cn"
#响应头
response = requests.get(url=url)
#获取响应头的编码
# print(response.encoding)
#新浪的编码
# print(chardet.detect(response.content))
# print(response.text)
charset = chardet.detect(response.content).get("encoding")
print(charset)
response.encoding = charset
# print(response.text)
html = response.text
images = re.findall(r"src=\"(.*?jpg|png|gif|jepg)\"",html)
print(images)
print(len(images))
# with open("sina.html","w",encoding="utf-8") as f:
# f.write(response.text)
#enumerate
for index,item, in enumerate (images):
print("开始从{}下载图片".format(item))
real_url = "http:"+item
print("开始从{}下载图片".format(real_url))
resp = requests.get(real_url)
with open("image/"+str(index)+".jpg","wb") as f:
#因为是字节数据,所以使用resp.content
f.write(resp.content)
import re
import requests
import chardet
url = "http://www.sina.com.cn"
#响应头
response = requests.get(url=url)
#获取响应头的编码
# print(response.encoding)
#新浪的编码
# print(chardet.detect(response.content))
# print(response.text)
charset = chardet.detect(response.content).get("encoding")
print(charset)
response.encoding = charset
# print(response.text)
html = response.text
images = re.findall(r"src=\"(.*?jpg|png|gif|jepg)\"",html)
print(images)
print(len(images))
# with open("sina.html","w",encoding="utf-8") as f:
# f.write(response.text)
#enumerate
for index,item, in enumerate (images):
print("开始从{}下载图片".format(item))
if not item.startswith("http"):
real_url = "http:"+item
print("开始从{}下载图片".format(real_url))
resp = requests.get(real_url)
with open("image/"+str(index)+".jpg","wb") as f:
#因为是字节数据,所以使用resp.content
f.write(resp.content)

手动伪造请求头
fake-useragent
pip install fake-useragent
python -m pip install fake-useragent

import fake_useragent
import re
import requests
from fake_useragent import UserAgent
# us = UserAgent()
# print(us)
# print(us.ie)
# print(us.chrome)
# print(us.random)
headers = {
"User-Agent":UserAgent().random
}
kw = input("请输入你要搜索的文字:")
parms = {
"wd":kw
}
url = "http://www.baidu.com/s?"
response = requests.get(url=url,params=parms,headers=headers)
print(response.text)

post请求:

var t = n.md5(navigator.appVersion), r = "" + (new Date).getTime(), i = r +
parseInt(10 * Math.random(), 10);
return {ts: r, bv: t, salt: i, sign: n.md5("fanyideskweb" + e + i +
"Tbh5E8=q6U3EXe+&L[4c@")}
{ts: r, bv: t, salt: i, sign: n.md5("fanyideskweb" + e + i +
"Tbh5E8=q6U3EXe+&L[4c@")}
i = r + parseInt(10 * Math.random()

salt == i

r = "" + (new Date).getTime()

time.time()
lvs = ts =r =time.time()
salt = i =time.time()+random.randint(0,10)=lvs+random.randint(0,10)

sign: n.md5("fanyideskweb" + e + i + "Tbh5E8=q6U3EXe+&L[4c@")

sign=hashlib.md5(“fanyideskweb”+word+salt+“Tbh5E8=q6U3EXe+&L[4c@”)

var t = n.md5(navigator.appVersion)

bv = t =hashlib.md5(“5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like
Gecko) Chrome/90.0.4430.85 Safari/537.36”

import time
import hashlib
import random
import requests
from fake_useragent import UserAgent
url = "https://fanyi.youdao.com/translate?"
headers = {
"User-Agent":UserAgent().random
}
word = input("请输入你要翻译的单词:")
lvs = time.time()*1000
salt = lvs + random.randint(1,10)
sign = hashlib.md5(("fanyideskweb"+ word + str(salt) +
"Tbh5E8=q6U3EXe+&L[4c@").encode("utf-8")).hexdigest()
bv = hashlib.md5("5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML,
like Gecko) Chrome/90.0.4430.85 Safari/537.36".encode("utf-8")).hexdigest()
from_data ={
"i":word,
"from":"AUTO",
"to":"AUTO",
"smartresult":"dict",
"client":"fanyideskweb",
"salt":salt,
"sign":sign,
"lts":lvs,
"bv":bv,
"doctype":"json",
"version":"2.1",
"keyfrom":"fanyi.web",
"action":"FY_BY_REALTlME",
}
response = requests.post(url=url,data=from_data,headers=headers)
print(response.text)

xpath的入门使用

进行数据筛选的时候正则表达式
xpath全称xml path
json {“id”:1,“name”:“zahng”}
Html超文本标记语言

xml:
1
张三
xpath规则:
nodename ----------- 选取所有节点的子节点
/ ------------------------ 根节点
// ------------- 从当前的节点选取子孙节点
. ---------------------- 选取当前节点
… ------------------- 选取当前节点的父节点
@ -------------------- 选取属性
在pycharm使用xpath需要安装lxml
pip install lxml
python - m pip install lxml
pip install lxml -i http://pypi.douban.com/simple/

from lxml import etree
content = """
<div class = "containner">
<ul class = "first">
<li><a href = "#">内容1</a></li>
<li><a href = "http://www.taobao.com">内容2</a></li>
<li class = "active"><a href = "#">内容3</a></li>
<li><a href = "#">内容4</a></li>
<li><a href = "http://www.baidu.com">内容5</a></li>
<li><a href = "#">内容6</a></li>
<li><a href = "#">内容7</a></li>
<li><a href = "#">内容8</a></li>
</ul>
</div>
"""
#etree.HTMLParser()解析器
html = etree.HTML(content,etree.HTMLParser())
print(html)
#获取所有li标签下a标签的内容
# res = html.xpath("//li/a/text()")
# print(res)
#获取a标签的属性
# res = html.xpath("//li/a/@href")
# print(res)
#如果要加上属性值
res = html.xpath("//li/a[@href='http://www.baidu.com']/text()")
print(res)

推荐大家使用:xpathhelper工具

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值