tf.reshape()与tf.transpose的理解

本文详细介绍了TensorFlow中tf.transpose与tf.reshape函数的使用方法,并通过具体实例展示了这两种函数如何改变张量的形状与数据排列方式。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

背景:初次接触tf.transpose,对其中的维度的理解,甚是困难,作此记录,以便以后查看

(1)tf.reshape()的理解

import tensorflow as tf
import  numpy as np
three_dim_data = tf.Variable(np.arange(100).reshape(2,5,10))
three_dim_data_reshape = tf.Variable(tf.reshape(three_dim_data,[10,10]))
with tf.Session().as_default() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(three_dim_data))
    print(sess.run(three_dim_data_reshape))

three_dim_data输出结果为:

[[[ 0  1  2  3  4  5  6  7  8  9]
  [10 11 12 13 14 15 16 17 18 19]
  [20 21 22 23 24 25 26 27 28 29]
  [30 31 32 33 34 35 36 37 38 39]
  [40 41 42 43 44 45 46 47 48 49]]

 [[50 51 52 53 54 55 56 57 58 59]
  [60 61 62 63 64 65 66 67 68 69]
  [70 71 72 73 74 75 76 77 78 79]
  [80 81 82 83 84 85 86 87 88 89]
  [90 91 92 93 94 95 96 97 98 99]]]

three_dim_data_reshape的输出结果为:

[[ 0  1  2  3  4  5  6  7  8  9]
 [10 11 12 13 14 15 16 17 18 19]
 [20 21 22 23 24 25 26 27 28 29]
 [30 31 32 33 34 35 36 37 38 39]
 [40 41 42 43 44 45 46 47 48 49]
 [50 51 52 53 54 55 56 57 58 59]
 [60 61 62 63 64 65 66 67 68 69]
 [70 71 72 73 74 75 76 77 78 79]
 [80 81 82 83 84 85 86 87 88 89]
 [90 91 92 93 94 95 96 97 98 99]]

通过两种情况的对比,reshape的操作,是将原始数据,先平铺出来[0-99],然后再按照维度的倒序,进行构建数据。

例如three_dim_data这是按照,先10,5,2这样的顺序构造数据。

three_dim_data_reshape则是先平铺,再10,10这样的顺序构造数据。

(2)tf.transpose的理解

理解了tf.reshape就很容易理解tf.transpose了

tf.transpose是改变数据的组成结构。功能与tf.reshape类似。

import tensorflow as tf
import  numpy as np
three_dim_data = tf.Variable(np.arange(100).reshape(2,5,10))
three_dim_data_transpose = tf.transpose(three_dim_data,[1,0,2])
transpose_shape = three_dim_data_transpose.shape

with tf.Session().as_default() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(three_dim_data))
    print(sess.run(three_dim_data_transpose))
    print(transpose_shape)

输出结果为:

[[[ 0  1  2  3  4  5  6  7  8  9]
  [50 51 52 53 54 55 56 57 58 59]]

 [[10 11 12 13 14 15 16 17 18 19]
  [60 61 62 63 64 65 66 67 68 69]]

 [[20 21 22 23 24 25 26 27 28 29]
  [70 71 72 73 74 75 76 77 78 79]]

 [[30 31 32 33 34 35 36 37 38 39]
  [80 81 82 83 84 85 86 87 88 89]]

 [[40 41 42 43 44 45 46 47 48 49]
  [90 91 92 93 94 95 96 97 98 99]]]
(5, 2, 10)

相当于将(2,5,10)reshape为了(5,2,10)

tf.transpose()中的[1,0,2]只是在交换(2,5,10)维度的的位置而已,交换后,就可以看成reshape了

帮我整段修改一下,改成TensorFlow2.11版本可用的:“ def TGCN(_X, _weights, _biases): ### cell_1 = tgcnCell(cell=custom_cell,gru_units=gru_units, adj=adj, num_nodes=num_nodes) cell = tf.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True) _X = tf.unstack(_X, axis=1) outputs, states = tf.nn.static_rnn(cell, _X, dtype=tf.float32) m = [] for i in outputs: o = tf.reshape(i,shape=[-1,num_nodes,gru_units]) o = tf.reshape(o,shape=[-1,gru_units]) m.append(o) last_output = m[-1] output = tf.matmul(last_output, _weights['out']) + _biases['out'] output = tf.reshape(output,shape=[-1,num_nodes,pre_len]) output = tf.transpose(output, perm=[0,2,1]) output = tf.reshape(output, shape=[-1,num_nodes]) return output, m, states ###### placeholders ###### # 使用 tf.placeholder inputs = tf.compat.v1.placeholder(tf.float32, shape=[None, seq_len, num_nodes]) labels = tf.compat.v1.placeholder(tf.float32, shape=[None, pre_len, num_nodes]) # Graph weights weights = { 'out': tf.Variable(tf.random.normal([gru_units, pre_len], mean=1.0), name='weight_o')} biases = { 'out': tf.random.normal(shape=[pre_len],name='bias_o')} if model_name == 'tgcn': pred,ttts,ttto = TGCN(inputs, weights, biases) y_pred = pred ###### optimizer ###### lambda_loss = 0.0015 Lreg = lambda_loss * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) label = tf.reshape(labels, [-1,num_nodes]) ##loss loss = tf.reduce_mean(tf.nn.l2_loss(y_pred-label) + Lreg) ##rmse error = tf.sqrt(tf.reduce_mean(tf.square(y_pred-label))) optimizer = tf.train.AdamOptimizer(lr).minimize(loss) ###### Initialize session ###### variables = tf.global_variables() saver = tf.train.Saver(tf.global_variables()) #sess = tf.Session() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) sess.run(tf.global_variables_initializer())”
04-05
按照TensorFlow2.11的写法修改这段代码:“class tgcnCell(RNN): """Temporal Graph Convolutional Network """ def call(self, inputs, **kwargs): pass def __init__(self, num_units, adj, num_nodes, input_size=None, act=tf.nn.tanh, reuse=None): super(tgcnCell, self).__init__(units=num_units,_reuse=reuse) self._act = act self._nodes = num_nodes self._units = num_units self._adj = [] self._adj.append(calculate_laplacian(adj)) @property def state_size(self): return self._nodes * self._units @property def output_size(self): return self._units def __call__(self, inputs, state, scope=None): with tf.variable_scope(scope or "tgcn"): with tf.variable_scope("gates"): value = tf.nn.sigmoid( self._gc(inputs, state, 2 * self._units, bias=1.0, scope=scope)) r, u = tf.split(value=value, num_or_size_splits=2, axis=1) with tf.variable_scope("candidate"): r_state = r * state c = self._act(self._gc(inputs, r_state, self._units, scope=scope)) new_h = u * state + (1 - u) * c return new_h, new_h def _gc(self, inputs, state, output_size, bias=0.0, scope=None): inputs = tf.expand_dims(inputs, 2) state = tf.reshape(state, (-1, self._nodes, self._units)) x_s = tf.concat([inputs, state], axis=2) input_size = x_s.get_shape()[2].value x0 = tf.transpose(x_s, perm=[1, 2, 0]) x0 = tf.reshape(x0, shape=[self._nodes, -1]) scope = tf.get_variable_scope() with tf.variable_scope(scope): for m in self._adj: x1 = tf.sparse_tensor_dense_matmul(m, x0) x = tf.reshape(x1, shape=[self._nodes, input_size,-1]) x = tf.transpose(x,perm=[2,0,1]) x = tf.reshape(x, shape=[-1, input_size]) weights = tf.get_variable( 'weights', [input_size, output_size], initializer=tf.contrib.layers.xavier_initializer()) x = tf.matmul(x, weights) # (batch_size * self._nodes, output_size) biases = tf.get_variable( "biases", [output_size], initializer=tf.constant_initializer(bias, dtype=tf.float32)) x = tf.nn.bias_add(x, biases) x = tf.reshape(x, shape=[-1, self._nodes, output_size]) x = tf.reshape(x, shape=[-1, self._nodes * output_size]) return x”
最新发布
04-05
评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值