transpose公式_tf.transpose函数的用法讲解(图解)

本文详细解析了TensorFlow中的tf.transpose函数,重点讲解了如何在不同维度下进行转置,包括二维数组的常规转置、高维数组的置换以及perm参数的使用。通过实例演示了二维数组的行列互换和三维数组的轴变换,帮助读者理解复杂的转置操作。
部署运行你感兴趣的模型镜像

tf.transpose函数中文意思是转置,对于低维度的转置问题,低维的还可以理解高维有点懵

看了博客也不是很明白

tf.transpose函数

tf.transpose(

a,

perm=None,

name='transpose',

conjugate=False

)

置换 a,根据 perm 重新排列尺寸.

返回的张量的维度 i 将对应于输入维度 perm[i].如果 perm 没有给出,它被设置为(n-1 ... 0),其中 n 是输入张量的秩.因此,默认情况下,此操作在二维输入张量上执行常规矩阵转置.如果共轭为 True,并且 a.dtype 是 complex64 或 complex128,那么 a 的值是共轭转置和.

例如:

x = tf.constant([[1, 2, 3], [4, 5, 6]])

tf.transpose(x) # [[1, 4]

# [2, 5]

# [3, 6]]

# Equivalently

#

tf.transpose(x, perm=[1, 0]) # [[1, 4]

# [2, 5]

# [3, 6]]

tf.transpose(x, perm=[0, 1])#[[1, 2, 3]

# [4, 5, 6]]

对于二维数组此时perm数组取值只能是0 1,perm=[0,1],0代表二维数组的行,1代表二维数组的列 为初始状态与原来的数据相等 二维数组为2行3列的矩阵

tf.transpose(x, perm=[1,0]),与初始perm=[0,1]相比第一列与第二列互换也就是行列互换,变成3*2的矩阵,结果可由上面验证

x = tf.constant([[[ 1, 2, 3],

[ 4, 5, 6]],

[[ 7, 8, 9],

[10, 11, 12]]])#shape=(2, 2, 3)

tf.transpose(x, perm=[0, 1,2])#shape=(2, 2, 3)=x

a=tf.transpose(x, perm=[1, 0, 2])

'''

[[[ 1 2 3]

[ 7 8 9]]

[[ 4 5 6]

[10 11 12]]]'

'''

b=tf.transpose(x, perm=[1, 2,0])

'''

[[[ 1 7]

[ 2 8]

[ 3 9]]

[[ 4 10]

[ 5 11]

[ 6 12]]]

'''

tf.transpose的第二个参数perm=[0,1,2],0代表三维数组的高本例中为2,1代表二维数组的行本例中为2,2代表二维数组的列本例中为3。初始数组维度是2*2*3

tf.transpose(x, perm=[1,0,2])代表将三位数组的高和行进行转置。 (与perm=[0,1,2])相比 一二列互换 维度依旧是2*2*3

94df9f26cc6adf93da869d433d685c95.png

tf.transpose(x, perm=[1,2,0]) 原始perm=[0,1,2]交换第一第二维变成perm=[1,0,2]在交换2 3维度变成

0d8051905fea2817204f9197aca5b14c.png

您可能感兴趣的与本文相关的镜像

TensorFlow-v2.15

TensorFlow-v2.15

TensorFlow

TensorFlow 是由Google Brain 团队开发的开源机器学习框架,广泛应用于深度学习研究和生产环境。 它提供了一个灵活的平台,用于构建和训练各种机器学习模型

//iterated error state EKF update modified for one specific system. void update_iterated_dyn_share_modified(double R, double &solve_time) { dyn_share_datastruct<scalar_type> dyn_share; dyn_share.valid = true; dyn_share.converge = true; int t = 0; state x_propagated = x_; cov P_propagated = P_; int dof_Measurement; Matrix<scalar_type, n, 1> K_h; Matrix<scalar_type, n, n> K_x; vectorized_state dx_new = vectorized_state::Zero(); for(int i=-1; i<maximum_iter; i++) { dyn_share.valid = true; h_dyn_share(x_, dyn_share); if(! dyn_share.valid) { continue; } //Matrix<scalar_type, Eigen::Dynamic, 1> h = h_dyn_share(x_, dyn_share); #ifdef USE_sparse spMt h_x_ = dyn_share.h_x.sparseView(); #else Eigen::Matrix<scalar_type, Eigen::Dynamic, 12> h_x_ = dyn_share.h_x; #endif double solve_start = omp_get_wtime(); dof_Measurement = h_x_.rows(); vectorized_state dx; x_.boxminus(dx, x_propagated); dx_new = dx; P_ = P_propagated; Matrix<scalar_type, 3, 3> res_temp_SO3; MTK::vect<3, scalar_type> seg_SO3; for (std::vector<std::pair<int, int> >::iterator it = x_.SO3_state.begin(); it != x_.SO3_state.end(); it++) { int idx = (*it).first; int dim = (*it).second; for(int i = 0; i < 3; i++){ seg_SO3(i) = dx(idx+i); } res_temp_SO3 = MTK::A_matrix(seg_SO3).transpose(); dx_new.template block<3, 1>(idx, 0) = res_temp_SO3 * dx_new.template block<3, 1>(idx, 0); for(int i = 0; i < n; i++){ P_. template block<3, 1>(idx, i) = res_temp_SO3 * (P_. template block<3, 1>(idx, i)); } for(int i = 0; i < n; i++){ P_. template block<1, 3>(i, idx) =(P_. template block<1, 3>(i, idx)) * res_temp_SO3.transpose(); } } Matrix<scalar_type, 2, 2> res_temp_S2; MTK::vect<2, scalar_type> seg_S2; for (std::vector<std::pair<int, int> >::iterator it = x_.S2_state.begin(); it != x_.S2_state.end(); it++) { int idx = (*it).first; int dim = (*it).second; for(int i = 0; i < 2; i++){ seg_S2(i) = dx(idx + i); } Eigen::Matrix<scalar_type, 2, 3> Nx; Eigen::Matrix<scalar_type, 3, 2> Mx; x_.S2_Nx_yy(Nx, idx); x_propagated.S2_Mx(Mx, seg_S2, idx); res_temp_S2 = Nx * Mx; dx_new.template block<2, 1>(idx, 0) = res_temp_S2 * dx_new.template block<2, 1>(idx, 0); for(int i = 0; i < n; i++){ P_. template block<2, 1>(idx, i) = res_temp_S2 * (P_. template block<2, 1>(idx, i)); } for(int i = 0; i < n; i++){ P_. template block<1, 2>(i, idx) = (P_. template block<1, 2>(i, idx)) * res_temp_S2.transpose(); } } //Matrix<scalar_type, n, Eigen::Dynamic> K_; //Matrix<scalar_type, n, 1> K_h; //Matrix<scalar_type, n, n> K_x; /* if(n > dof_Measurement) { K_= P_ * h_x_.transpose() * (h_x_ * P_ * h_x_.transpose()/R + Eigen::Matrix<double, Dynamic, Dynamic>::Identity(dof_Measurement, dof_Measurement)).inverse()/R; } else { K_= (h_x_.transpose() * h_x_ + (P_/R).inverse()).inverse()*h_x_.transpose(); } */ if(n > dof_Measurement) { //#ifdef USE_sparse //Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic> K_temp = h_x * P_ * h_x.transpose(); //spMt R_temp = h_v * R_ * h_v.transpose(); //K_temp += R_temp; Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic> h_x_cur = Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic>::Zero(dof_Measurement, n); h_x_cur.topLeftCorner(dof_Measurement, 12) = h_x_; /* h_x_cur.col(0) = h_x_.col(0); h_x_cur.col(1) = h_x_.col(1); h_x_cur.col(2) = h_x_.col(2); h_x_cur.col(3) = h_x_.col(3); h_x_cur.col(4) = h_x_.col(4); h_x_cur.col(5) = h_x_.col(5); h_x_cur.col(6) = h_x_.col(6); h_x_cur.col(7) = h_x_.col(7); h_x_cur.col(8) = h_x_.col(8); h_x_cur.col(9) = h_x_.col(9); h_x_cur.col(10) = h_x_.col(10); h_x_cur.col(11) = h_x_.col(11); */ Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic> K_ = P_ * h_x_cur.transpose() * (h_x_cur * P_ * h_x_cur.transpose()/R + Eigen::Matrix<double, Dynamic, Dynamic>::Identity(dof_Measurement, dof_Measurement)).inverse()/R; K_h = K_ * dyn_share.h; K_x = K_ * h_x_cur; //#else // K_= P_ * h_x.transpose() * (h_x * P_ * h_x.transpose() + h_v * R * h_v.transpose()).inverse(); //#endif } else { #ifdef USE_sparse //Eigen::Matrix<scalar_type, n, n> b = Eigen::Matrix<scalar_type, n, n>::Identity(); //Eigen::SparseQR<Eigen::SparseMatrix<scalar_type>, Eigen::COLAMDOrdering<int>> solver; spMt A = h_x_.transpose() * h_x_; cov P_temp = (P_/R).inverse(); P_temp. template block<12, 12>(0, 0) += A; P_temp = P_temp.inverse(); /* Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic> h_x_cur = Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic>::Zero(dof_Measurement, n); h_x_cur.col(0) = h_x_.col(0); h_x_cur.col(1) = h_x_.col(1); h_x_cur.col(2) = h_x_.col(2); h_x_cur.col(3) = h_x_.col(3); h_x_cur.col(4) = h_x_.col(4); h_x_cur.col(5) = h_x_.col(5); h_x_cur.col(6) = h_x_.col(6); h_x_cur.col(7) = h_x_.col(7); h_x_cur.col(8) = h_x_.col(8); h_x_cur.col(9) = h_x_.col(9); h_x_cur.col(10) = h_x_.col(10); h_x_cur.col(11) = h_x_.col(11); */ K_ = P_temp. template block<n, 12>(0, 0) * h_x_.transpose(); K_x = cov::Zero(); K_x. template block<n, 12>(0, 0) = P_inv. template block<n, 12>(0, 0) * HTH; /* solver.compute(R_); Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic> R_in_temp = solver.solve(b); spMt R_in =R_in_temp.sparseView(); spMt K_temp = h_x.transpose() * R_in * h_x; cov P_temp = P_.inverse(); P_temp += K_temp; K_ = P_temp.inverse() * h_x.transpose() * R_in; */ #else cov P_temp = (P_/R).inverse(); //Eigen::Matrix<scalar_type, 12, Eigen::Dynamic> h_T = h_x_.transpose(); Eigen::Matrix<scalar_type, 12, 12> HTH = h_x_.transpose() * h_x_; P_temp. template block<12, 12>(0, 0) += HTH; /* Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic> h_x_cur = Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic>::Zero(dof_Measurement, n); //std::cout << "line 1767" << std::endl; h_x_cur.col(0) = h_x_.col(0); h_x_cur.col(1) = h_x_.col(1); h_x_cur.col(2) = h_x_.col(2); h_x_cur.col(3) = h_x_.col(3); h_x_cur.col(4) = h_x_.col(4); h_x_cur.col(5) = h_x_.col(5); h_x_cur.col(6) = h_x_.col(6); h_x_cur.col(7) = h_x_.col(7); h_x_cur.col(8) = h_x_.col(8); h_x_cur.col(9) = h_x_.col(9); h_x_cur.col(10) = h_x_.col(10); h_x_cur.col(11) = h_x_.col(11); */ cov P_inv = P_temp.inverse(); //std::cout << "line 1781" << std::endl; K_h = P_inv. template block<n, 12>(0, 0) * h_x_.transpose() * dyn_share.h; //std::cout << "line 1780" << std::endl; //cov HTH_cur = cov::Zero(); //HTH_cur. template block<12, 12>(0, 0) = HTH; K_x.setZero(); // = cov::Zero(); K_x. template block<n, 12>(0, 0) = P_inv. template block<n, 12>(0, 0) * HTH; //K_= (h_x_.transpose() * h_x_ + (P_/R).inverse()).inverse()*h_x_.transpose(); #endif } //K_x = K_ * h_x_; Matrix<scalar_type, n, 1> dx_ = K_h + (K_x - Matrix<scalar_type, n, n>::Identity()) * dx_new; state x_before = x_; x_.boxplus(dx_); dyn_share.converge = true; for(int i = 0; i < n ; i++) { if(std::fabs(dx_[i]) > limit[i]) { dyn_share.converge = false; break; } } if(dyn_share.converge) t++; if(!t && i == maximum_iter - 2) { dyn_share.converge = true; } if(t > 1 || i == maximum_iter - 1) { L_ = P_; //std::cout << "iteration time" << t << "," << i << std::endl; Matrix<scalar_type, 3, 3> res_temp_SO3; MTK::vect<3, scalar_type> seg_SO3; for(typename std::vector<std::pair<int, int> >::iterator it = x_.SO3_state.begin(); it != x_.SO3_state.end(); it++) { int idx = (*it).first; for(int i = 0; i < 3; i++){ seg_SO3(i) = dx_(i + idx); } res_temp_SO3 = MTK::A_matrix(seg_SO3).transpose(); for(int i = 0; i < n; i++){ L_. template block<3, 1>(idx, i) = res_temp_SO3 * (P_. template block<3, 1>(idx, i)); } // if(n > dof_Measurement) // { // for(int i = 0; i < dof_Measurement; i++){ // K_.template block<3, 1>(idx, i) = res_temp_SO3 * (K_. template block<3, 1>(idx, i)); // } // } // else // { for(int i = 0; i < 12; i++){ K_x. template block<3, 1>(idx, i) = res_temp_SO3 * (K_x. template block<3, 1>(idx, i)); } //} for(int i = 0; i < n; i++){ L_. template block<1, 3>(i, idx) = (L_. template block<1, 3>(i, idx)) * res_temp_SO3.transpose(); P_. template block<1, 3>(i, idx) = (P_. template block<1, 3>(i, idx)) * res_temp_SO3.transpose(); } } Matrix<scalar_type, 2, 2> res_temp_S2; MTK::vect<2, scalar_type> seg_S2; for(typename std::vector<std::pair<int, int> >::iterator it = x_.S2_state.begin(); it != x_.S2_state.end(); it++) { int idx = (*it).first; for(int i = 0; i < 2; i++){ seg_S2(i) = dx_(i + idx); } Eigen::Matrix<scalar_type, 2, 3> Nx; Eigen::Matrix<scalar_type, 3, 2> Mx; x_.S2_Nx_yy(Nx, idx); x_propagated.S2_Mx(Mx, seg_S2, idx); res_temp_S2 = Nx * Mx; for(int i = 0; i < n; i++){ L_. template block<2, 1>(idx, i) = res_temp_S2 * (P_. template block<2, 1>(idx, i)); } // if(n > dof_Measurement) // { // for(int i = 0; i < dof_Measurement; i++){ // K_. template block<2, 1>(idx, i) = res_temp_S2 * (K_. template block<2, 1>(idx, i)); // } // } // else // { for(int i = 0; i < 12; i++){ K_x. template block<2, 1>(idx, i) = res_temp_S2 * (K_x. template block<2, 1>(idx, i)); } //} for(int i = 0; i < n; i++){ L_. template block<1, 2>(i, idx) = (L_. template block<1, 2>(i, idx)) * res_temp_S2.transpose(); P_. template block<1, 2>(i, idx) = (P_. template block<1, 2>(i, idx)) * res_temp_S2.transpose(); } } // if(n > dof_Measurement) // { // Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic> h_x_cur = Eigen::Matrix<scalar_type, Eigen::Dynamic, Eigen::Dynamic>::Zero(dof_Measurement, n); // h_x_cur.topLeftCorner(dof_Measurement, 12) = h_x_; // /* // h_x_cur.col(0) = h_x_.col(0); // h_x_cur.col(1) = h_x_.col(1); // h_x_cur.col(2) = h_x_.col(2); // h_x_cur.col(3) = h_x_.col(3); // h_x_cur.col(4) = h_x_.col(4); // h_x_cur.col(5) = h_x_.col(5); // h_x_cur.col(6) = h_x_.col(6); // h_x_cur.col(7) = h_x_.col(7); // h_x_cur.col(8) = h_x_.col(8); // h_x_cur.col(9) = h_x_.col(9); // h_x_cur.col(10) = h_x_.col(10); // h_x_cur.col(11) = h_x_.col(11); // */ // P_ = L_ - K_*h_x_cur * P_; // } // else //{ P_ = L_ - K_x.template block<n, 12>(0, 0) * P_.template block<12, n>(0, 0); //} solve_time += omp_get_wtime() - solve_start; return; } solve_time += omp_get_wtime() - solve_start; } } 详细注释上述代码,保留原有注释掉得内容
最新发布
09-30
评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符  | 博主筛选后可见
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值