LibTorch基础tensor运算
赋值
随机生成
-
torch.rand
torch.rand(*size, *, generator=None, out=None, dtype=None, layout=torch.strided, device=None, requires_grad=False, pin_memory=False)
参数:size 大小,就是多维数组。例如:**(2,3,4)**表示:有2组,每组3行4列
后面跟的都是一些关键字
-
torch::rand
at::Tensor torch::rand(at::IntArrayRef size, c10::optional<at::DimnameList> names, at::TensorOptions options = {})
-
最简单的用法:
torch::Tensor tensor = torch::rand({2,3,4}); cout << tensor << endl; // 随机生成一个tensor,共2组,每组是一个3行4列的二维数组 /* $ ./test (1,.,.) = 0.3868 0.3094 0.0257 0.4412 0.6921 0.5190 0.2776 0.9405 0.0688 0.4216 0.9570 0.5154 (2,.,.) = 0.0201 0.4228 0.2302 0.3697 0.0914 0.2225 0.7399 0.0874 0.7590 0.7395 0.0321 0.1197 [ CPUFloatType{2,3,4} ] */
自定义
-
手动自定义:
// 需要使用小写的tensor方法,而不是构建Tensor类 torch::Tensor tensor = torch::tensor({{2,3,4},{5,6,7}}); cout << tensor << endl; /* $ ./test 2 3 4 5 6 7 [ CPULongType{2,3} ] */
-
利用vector定义一维张量:
vector<int> array; array.push_back(1); array.push_back(2); array.push_back(3); torch::Tensor tensor = torch::tensor(array); cout << tensor << endl; /* $ ./test 1 2 3 [ CPULongType{3} ] */
-
使用NdArray自定义二维张量:
nc::NdArray<int> a = {{1,2,3},{4,5,6}}; cout << "NdArray: \n" << a << endl; auto options = torch::TensorOptions().dtype(torch::kInt32); auto data = a.data(); torch::Tensor tensor = torch::from_blob(data, {a.numRows(), a.numCols()}, options).clone(); cout << "tensor: \n" << tensor << endl; /* $ ./test NdArray: [[1, 2, 3, ] [4, 5, 6, ]] tensor: 1 2 3 4 5 6 [ CPUIntType{2,3} ] */
更改张量形状
resize和reshape:
-
(看不出有什么区别,应用在图片上会有区别,resize是对图片进行裁剪,rehape是对图片像素重新排列组合)
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}}); cout << "tensor: \n" << tensor << endl; cout << "tensor resize: \n" << tensor.resize_({3,2}) << endl; cout << "tensor reshape: \n" << tensor.reshape({3,2}) << endl; cout << "resize function: \n" << torch::resize(tensor, {3,2}) << endl; cout << "reshape function: \n" << torch::reshape(tensor, {3,2}) << endl; /* $ ./test tensor: 1 2 3 4 5 6 [ CPULongType{2,3} ] tensor resize: 1 2 3 4 5 6 [ CPULongType{3,2} ] tensor reshape: 1 2 3 4 5 6 [ CPULongType{3,2} ] resize function: 1 2 3 4 5 6 [ CPULongType{3,2} ] reshape function: 1 2 3 4 5 6 [ CPULongType{3,2} ] */
矩阵转置
at::Tensor at::transpose(const at::Tensor &self, int64_t dim0, int64_t dim1);
// 需要两个或三个参数
// 后两个参数表明是那几个维度转置,下标从0开始
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}});
cout << "tensor: \n" << tensor << endl;
cout << "Transpose: \n" << tensor.transpose(0, 1) << endl;
cout << "Transpose: \n" << torch::transpose(tensor, 0, 1) << endl;
/*
$ ./test
tensor:
1 2 3
4 5 6
[ CPULongType{2,3} ]
Transpose:
1 4
2 5
3 6
[ CPULongType{3,2} ]
Transpose:
1 4
2 5
3 6
[ CPULongType{3,2} ]
*/
矩阵压缩 squeeze与unsqueeze
at::Tensor at::squeeze(const at::Tensor &self);
// 对tensor变量进行维度压缩,去除维数为1的维度
at::Tensor at::squeeze(const at::Tensor &self, at::Dimname dim);
// 指定压缩第n位,如果它的维数为1,则压缩,反之不对该维度操作
at::Tensor at::unsqueeze(const at::Tensor &self, int64_t dim);
// 对数据维度进行扩充,给指定位置加上维数为1的维度
// dim下标从0开始
torch::Tensor tensor = torch::empty({1,2,1,3,1});
cout << "original size: \n" << tensor.sizes() << endl;
tensor = torch::squeeze(tensor,2);
cout << "size: \n" << tensor.sizes() << endl;
tensor = torch::squeeze(tensor);
cout << "size: \n" << tensor.sizes() << endl;
tensor = torch::unsqueeze(tensor, 0);
cout << "unsqueeze size: \n" << tensor.sizes() << endl;
/*
$ ./test
original size:
[1, 2, 1, 3, 1]
size:
[1, 2, 3, 1]
size:
[2, 3]
unsqueeze size:
[1, 2, 3]
*/
加
-
torch.tensor
torch.add(input, other, *, alpha=1, out=None)
-
给张量加上一个标量或另一个张量。如果同时指定了alpha和other,则other的每个元素在使用之前都要按alpha进行缩放。
-
如果other是一个张量,other的shape必须可以广播成input的shape。
-
张量相加的前提是它们的shape可以相加。
-
at::add
at::Tensor at::add(const at::Tensor &self, const at::Tensor &other, const at::Scalar &alpha = 1)
-
返回值为Tensor,需要把相加的值都加进去
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}}); cout << "tensor: \n" << tensor << endl; torch::Tensor tensor2 = torch::tensor({{2,3,4},{5,6,7}}); cout << "tensor2: \n" << tensor2 << endl; tensor = torch::add(tensor, tensor2); cout << "After add:\n" << tensor << endl; /* $ ./test tensor: 1 2 3 4 5 6 [ CPULongType{2,3} ] tensor2: 2 3 4 5 6 7 [ CPULongType{2,3} ] After add: 3 5 7 9 11 13 [ CPULongType{2,3} ] */
减
-
at::sub
at::Tensor at::sub(const at::Tensor &self, const at::Scalar &other, const at::Scalar &alpha = 1)
-
和加法一样
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}}); cout << "tensor: \n" << tensor << endl; torch::Tensor tensor2 = torch::tensor({{2,3,4},{5,6,7}}); cout << "tensor2: \n" << tensor2 << endl; tensor = torch::sub(tensor, tensor2); cout << "After sub:\n" << tensor <<endl; /* $ ./test tensor: 1 2 3 4 5 6 [ CPULongType{2,3} ] tensor2: 2 3 4 5 6 7 [ CPULongType{2,3} ] After sub: -1 -1 -1 -1 -1 -1 [ CPULongType{2,3} ] */
乘
张量乘标量
-
mul:()
at::Tensor at::mul(const at::Tensor &self, const at::Scalar &other)
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}}); cout << "tensor: \n" << tensor << endl; tensor = torch::mul(tensor, 10); cout << "After mul: \n" << tensor << endl; /* $ ./test tensor: 1 2 3 4 5 6 [ CPULongType{2,3} ] After mul: 10 20 30 40 50 60 [ CPULongType{2,3} ] */
点乘
-
torch::dot
at::Tensor at::dot(const at::Tensor &self, const at::Tensor &tensor)
-
注意:目前torch::dot仅支持一维张量计算
torch::Tensor tensor = torch::tensor({1,2,3}); cout << "tensor: \n" << tensor << endl; torch::Tensor tensor2 = torch::tensor({2,3,4}); cout << "tensor2: \n" << tensor2 << endl; torch::Tensor tensor3 = torch::empty({1,1}); tensor3 = torch::dot(tensor, tensor2); cout << "After dot:\n" << tensor3 << endl; /* $ ./test tensor: 1 2 3 [ CPULongType{3} ] tensor2: 2 3 4 [ CPULongType{3} ] After dot: 20 [ CPULongType{} ] */
多维张量相乘
-
matmul:
at::Tensor at::matmul(const at::Tensor &self, const at::Tensor &other)
-
注意矩阵的维度是否能够相乘
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}}); cout << "tensor: \n" << tensor << endl; torch::Tensor tensor2 = torch::tensor({{2,3,4},{5,6,7}}); cout << "tensor2: \n" << tensor2 << endl; torch::Tensor tensor3 = torch::empty({2,2}); tensor3 = torch::matmul(tensor, tensor2.reshape({3,2})); cout << "tensor2: \n" << tensor2.reshape({3,2}) << endl; cout << "After mul:\n" << tensor3 << endl; /* $ ./test tensor: 1 2 3 4 5 6 [ CPULongType{2,3} ] tensor2: 2 3 4 5 6 7 [ CPULongType{2,3} ] tensor2: 2 3 4 5 6 7 [ CPULongType{3,2} ] After mul: 28 34 64 79 [ CPULongType{2,2} ] */
除
矩阵除法:
- 计算A / B,即计算A乘矩阵B的逆
- 手动实现:
torch::Tensor tensor = torch::tensor({{1.0,2.0},{4.0,5.0}});
torch::Tensor tensor2 = torch::tensor({{2.0,3.0},{5.0,6.0}});
cout << "tensor: \n" << tensor << endl;
cout << "tensor: \n" << tensor2 << endl;
tensor = torch::matmul(tensor, torch::inverse(tensor2));
cout << "After matnul: \n" << tensor << endl;
/*
$ ./test
tensor:
1 2
4 5
[ CPUFloatType{2,2} ]
tensor:
2 3
5 6
[ CPUFloatType{2,2} ]
After matnul:
1.3333 -0.3333
0.3333 0.6667
[ CPUFloatType{2,2} ]
*/
点除:
-
这个和矩阵除法不一样
-
计算A和B点除,会把A的每一个元素除以B对应的元素
-
点除div():
at::Tensor at::div(const at::Tensor &self, const at::Tensor &other); at::Tensor at::div(const at::Tensor &self, const at::Scalar &other); at::Tensor at::div(const at::Tensor &self, const at::Tensor &other, c10::optional<c10::string_view> rounding_mode); at::Tensor at::div(const at::Tensor &self, const at::Scalar &other, c10::optional<c10::string_view> rounding_mode); /* rounding_mode:输入为字符串类型,用于判断结果的舍入类型,有以下三种情况: None:默认行为,不执行舍入操作。 trunc:将除法结果向零四舍五入,相当于C语言风格的除法。 floor:将除法结果四舍五入 */
torch::Tensor tensor = torch::tensor({{1.0,2.0},{4.0,5.0}}); torch::Tensor tensor2 = torch::tensor({{2.0,3.0},{5.0,6.0}}); cout << "tensor: \n" << tensor << endl; cout << "tensor: \n" << tensor2 << endl; torch::Tensor tensor3 = torch::empty({2,2}); tensor3 = torch::div(tensor, tensor2); cout << "After div: \n" << tensor3 << endl; /* $ ./test tensor: 1 2 4 5 [ CPUFloatType{2,2} ] tensor: 2 3 5 6 [ CPUFloatType{2,2} ] After div: 0.5000 0.6667 0.8000 0.8333 [ CPUFloatType{2,2} ] */
逆
-
函数原型:
at::Tensor at::inverse(const at::Tensor &self); // 注意:传入的张量不能是int型 // 不知道为什么,传int就报错
torch::Tensor tensor = torch::tensor({{1.0,2.0},{4.0,5.0}}); cout << "After inverse: \n" << torch::inverse(tensor); /* $ ./test After inverse: -1.6667 0.6667 1.3333 -0.3333 [ CPUFloatType{2,2} ] */
复制
-
clone: 返回一个和源张量同
shape
、dtype
和device
的张量,与源张量不共享数据内存,但提供梯度的回溯。at::Tensor at::clone(const at::Tensor &self, c10::optional<at::MemoryFormat> memory_format = c10::nullopt); /* memory_format: torch.contiguous_format: 张量分配在密集的非重叠内存中。以递减顺序的值表示的跨步。 torch.channels_last: 张量分配在密集的非重叠内存中。Strides [0] > Strides [2] > Strides [3] > Strides[1] == 1即NHWC顺序 torch.preserve_format: 在像clone这样的函数中使用,以保留输入张量的内存格式。如果输入张量分配在密集的非重叠内存中,则输出张量步长将从输入中复制。否则输出步长将遵循torch. continuous_format 【以上内容来自有道翻译,为机翻,或许有不准确的地方】 */
torch::Tensor tensor = torch::tensor({{1.0,2.0},{4.0,5.0}}); torch::Tensor tensor2 = torch::clone(tensor); cout << "tensor: \n" << tensor << endl; cout << "tensor2: \n" << tensor2 << endl; /* $ ./test tensor: 1 2 4 5 [ CPUFloatType{2,2} ] tensor2: 1 2 4 5 [ CPUFloatType{2,2} ] */
-
detach: 机制则与
clone
完全不同,即返回一个和源张量同shape、dtype和device的张量,与源张量共享数据内存,但不提供梯度计算,即requires_grad=False
,因此脱离计算图。at::Tensor at::detach(const at::Tensor &self)
torch::Tensor tensor = torch::tensor({{1.0,2.0},{4.0,5.0}}); torch::Tensor tensor2 = torch::detach(tensor); cout << "tensor: \n" << tensor << endl; cout << "tensor2: \n" << tensor2 << endl; /* $ ./test tensor: 1 2 4 5 [ CPUFloatType{2,2} ] tensor2: 1 2 4 5 [ CPUFloatType{2,2} ] */
-
copy:同样将源张量中的数据复制到目标张量(数据不共享),其
device
、dtype
和requires_grad
一般都保留目标张量的设定,仅仅进行数据复制,同时其支持broadcast操作。at::Tensor at::copy(const at::Tensor &self, const at::Tensor &src, bool non_blocking = false); /* non_blocking (bool) :如果为True,且此拷贝介于CPU和GPU之间,则可能会对主机进行异步复制。在其他情况下,这个论点没有效果。 【以上内容来自有道翻译】 */
torch::Tensor tensor = torch::tensor({{1.0,2.0},{4.0,5.0}}); torch::Tensor tensor2 = torch::empty({2,2}); tensor2.copy_(tensor); cout << "tensor: \n" << tensor << endl; cout << "tensor2: \n" << tensor2 << endl; /* $ ./test tensor: 1 2 4 5 [ CPUFloatType{2,2} ] tensor2: 1 2 4 5 [ CPUFloatType{2,2} ] */
求和
-
所有数据求和:
at::Tensor at::sum(const at::Tensor &self, c10::optional<at::ScalarType> dtype = c10::nullopt)
-
对某一列或行求和:
at::Tensor at::sum(const at::Tensor &self, at::DimnameList dim, bool keepdim = false, c10::optional<at::ScalarType> dtype = c10::nullopt); /* dim : 0: 对列求和 1: 对行求和 keepdim : 保持原来的维度 */
torch::Tensor tensor = torch::tensor({{1.0,2.0},{4.0,5.0}}); cout << "tensor: \n" << tensor << endl; torch::Tensor tensor2 = torch::sum(tensor); cout << "sum: \n" << tensor2 << endl; tensor2 = torch::sum(tensor, 0, true); cout << "sum: \n" << tensor2 << endl; tensor2 = torch::sum(tensor, 1, true); cout << "sum: \n" << tensor2 << endl; tensor2 = torch::sum(tensor.transpose(0, 1), 0); cout << "sum: \n" << tensor2 << endl; /* $ ./test tensor: 1 2 4 5 [ CPUFloatType{2,2} ] sum: 12 [ CPUFloatType{} ] sum: 5 7 [ CPUFloatType{1,2} ] sum: 3 9 [ CPUFloatType{2,1} ] sum: 3 9 [ CPUFloatType{2} ] */
取绝对值
at::Tensor at::abs(const at::Tensor &self)
torch::Tensor tensor = torch::tensor({{1.0,-2.0},{-4.0,5.0}});
cout << "tensor: \n" << tensor << endl;
torch::Tensor tensor2 = torch::abs(tensor);
cout << "abs: \n" << tensor2 << endl;
/*
$ ./test
tensor:
1 -2
-4 5
[ CPUFloatType{2,2} ]
abs:
1 2
4 5
[ CPUFloatType{2,2} ]
*/
平方和开方
- 对张量的每一个数字开方:
at::Tensor at::sqrt(const at::Tensor &self)
torch::Tensor tensor = torch::tensor({{4,9,16},{25,36,49}});
tensor = torch::sqrt(tensor);
cout << "After sqrt: \n" << tensor << endl;
/*
$ ./test
After sqrt:
2 3 4
5 6 7
[ CPUFloatType{2,3} ]
*/
-
做平方
at::Tensor at::square(const at::Tensor &self)
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}}); cout << "original tensor: \n" << tensor << endl; tensor= torch::square(tensor); cout << "After square: \n" << tensor << endl; /* $ ./test original tensor: 1 2 3 4 5 6 [ CPULongType{2,3} ] After square: 1 4 9 16 25 36 [ CPULongType{2,3} ] */
最大最小值
单个矩阵的最值
at::Tensor at::max(const at::Tensor &self);
inline ::std::tuple<at::Tensor, at::Tensor> at::max(const at::Tensor &self, int64_t dim, bool keepdim = false);
// 返回命名元组(最大值,最大值索引),最大值是给定维度中的最大值,索引为在对应维度中的索引
// 当有多个最大值时,返回第一个最大值的索引
// 当没有指定维度 dim 时,则返回所有元素中的最大值
// 【指定维度时,返回值是元组,需要用元组来接收】
at::Tensor at::min(const at::Tensor &self);
inline ::std::tuple<at::Tensor, at::Tensor> at::min(const at::Tensor &self, int64_t dim, bool keepdim = false);
// 用法同max
torch::Tensor tensor = torch::tensor({{4,9,16},{25,36,49}});
cout << "original tensor: \n" << tensor << endl;
tensor = torch::max(tensor);
cout << "Max: \n" << tensor << endl;
tensor = torch::tensor({{4,9,16},{25,36,49}});
tensor = torch::min(tensor);
cout << "Min: \n" << tensor << endl;
tensor = torch::tensor({{4,9,16},{25,36,49}});
tuple<torch::Tensor, torch::Tensor> array = torch::max(tensor, 0);
cout << "Max: \n" << get<0>(array) << endl;
tensor = torch::tensor({{4,9,16},{25,36,49}});
array = torch::min(tensor, 0);
cout << "Min: \n" << get<0>(array) << endl;
/*
$ ./test
original tensor:
4 9 16
25 36 49
[ CPULongType{2,3} ]
Max:
49
[ CPULongType{} ]
Min:
4
[ CPULongType{} ]
Max:
25
36
49
[ CPULongType{3} ]
Min:
4
9
16
[ CPULongType{3} ]
*/
两个矩阵比较
at::Tensor at::maximum(const at::Tensor &self, const at::Tensor &other);
// 计算输入张量成对比较的最大值
// 如果比较元素包含 NaN,则返回 NaN
at::Tensor at::minimum(const at::Tensor &self, const at::Tensor &other);
torch::Tensor tensor = torch::tensor({{4,9,16},{25,36,49}});
torch::Tensor tensor2 = torch::tensor({{1,10,9},{25,99,0}});
cout << "original tensor: \n" << tensor << endl;
cout << "original tensor2: \n" << tensor2 << endl;
tensor = torch::maximum(tensor, tensor2);
cout << "the max is: \n" << tensor << endl;
tensor = torch::tensor({{4,9,16},{25,36,49}});
tensor = torch::minimum(tensor, tensor2);
cout << "the mini is: \n" << tensor << endl;
/*
$ ./test
original tensor:
4 9 16
25 36 49
[ CPULongType{2,3} ]
original tensor2:
1 10 9
25 99 0
[ CPULongType{2,3} ]
the max is:
4 10 16
25 99 49
[ CPULongType{2,3} ]
the mini is:
1 9 9
25 36 0
[ CPULongType{2,3} ]
*/
获取最大、最小值索引
at::Tensor at::argmin(const at::Tensor &self, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false);
at::Tensor at::argmax(const at::Tensor &self, c10::optional<int64_t> dim = c10::nullopt, bool keepdim = false);
// dim: 0表示每一列的最大最小值,1表示每一行的最大最小值
// 如果不指定dim,则返回整个tensor中的最大值
torch::Tensor tensor = torch::tensor({{1,10,9},{25,99,0}});
cout << "original tensor: \n" << tensor << endl;
tensor = torch::argmax(tensor);
cout << "index of max:\n" << tensor << endl;
tensor = torch::tensor({{1,10,9},{25,99,0}});
tensor = torch::argmax(tensor, 0);
cout << "max of every column:\n" << tensor << endl;
tensor = torch::tensor({{1,10,9},{25,99,0}});
tensor = torch::argmin(tensor, 1);
cout << "min of every line:\n" << tensor << endl;
/*
$ ./test
original tensor:
1 10 9
25 99 0
[ CPULongType{2,3} ]
index of max:
4
[ CPULongType{} ]
max of every column:
1
1
0
[ CPULongType{3} ]
min of every line:
0
2
[ CPULongType{2} ]
*/
根据下标取值
- tensor版:
at::Tensor at::index_select(const at::Tensor &self, int64_t dim, const at::Tensor &index);
// dim : 指定是行坐标还是列坐标,0为列,1为行
// index : 下标值,这个函数中必须是tensor值
torch::Tensor tensor = torch::tensor({{1,10,9},{25,99,0}});
cout << "original tensor: \n" << tensor << endl;
torch::Tensor index = torch::tensor({1});
tensor= torch::index_select(tensor, 0, index);
cout << "the max value: \n" << tensor << endl;
/*
$ ./test
original tensor:
1 10 9
25 99 0
[ CPULongType{2,3} ]
the max value:
25 99 0
[ CPULongType{1,3} ]
*/
- int版:
at::Tensor at::select(const at::Tensor &self, at::Dimname dim, int64_t index);
torch::Tensor tensor = torch::tensor({{1,10,9},{25,99,0}});
cout << "original tensor: \n" << tensor << endl;
tensor= torch::select(tensor, 0, 1);
cout << "value of index: \n" << tensor << endl;
/*
$ ./test
original tensor:
1 10 9
25 99 0
[ CPULongType{2,3} ]
value of index:
25
99
0
[ CPULongType{3} ]
*/
矩阵分块
inline ::std::vector<at::Tensor> at::tensor_split(const at::Tensor &self, int64_t sections, int64_t dim = 0);
// 后两个参数:
// 最后一个int指定分割那一个维度
// 第二个参数指定分割为几块,这一点与python版的有差别
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}});
cout << "original tensor: \n" << tensor << endl;
vector<torch::Tensor> array = torch::tensor_split(tensor, 3, 1);
for(int i = 0; i < array.size(); i++)
cout << array[i] << endl;
/*
$ ./test
original tensor:
1 2 3
4 5 6
[ CPULongType{2,3} ]
1
4
[ CPULongType{2,1} ]
2
5
[ CPULongType{2,1} ]
3
6
[ CPULongType{2,1} ]
*/
排序
inline ::std::tuple<at::Tensor, at::Tensor> at::sort(const at::Tensor &self, int64_t dim = -1, bool descending = false);
// dim=0 按列排序,dim=1 按行排序,默认 dim=1
// 降序,descending=True 从大到小排序,descending=False 从小到大排序,默认 descending=Flase
torch::Tensor tensor = torch::tensor({{1,2,3},{4,5,6}});
cout << "original tensor: \n" << tensor << endl;
tuple<torch::Tensor, torch::Tensor> array = torch::sort(tensor, 1, true);
cout << "After sort:\n" << get<0>(array) << endl;
/*
$ ./test
original tensor:
1 2 3
4 5 6
[ CPULongType{2,3} ]
After sort:
3 2 1
6 5 4
[ CPULongType{2,3} ]
*/
参考文献:
- 【Pytorch】对比clone、detach以及copy_等张量复制操作_guofei_fly的博客-优快云博客
- torch.sum()的用法_笨笨的蛋的博客-优快云博客
- cpplibtorch的tensor与nc::NdArray的像话转换_小北_North的博客-优快云博客
- pytorch中的squeeze和unsqueeze的用法小结_squeeze pytorch_非晚非晚的博客-优快云博客
- 【冰糖Python】PyTorch:最大值 最小值 torch.max() torch.min() torch.maximum() torch.minimum()_冰糖不在家的博客-优快云博客
- torch.argmax函数学习_GeneralJing的博客-优快云博客
- torch.sort_torch.sort()_冷漠的蓝炮仗的博客-优快云博客