关于我之前问你的代码,有几个问题,@property是什么用法
def call(self, inputs, state, scope=‘convLSTM’):
“”“Long short-term memory cell (LSTM).”“”
with tf.variable_scope(scope or type(self).name): # “BasicLSTMCell”
# Parameters of gates are concatenated into one multiply for efficiency.
if self._state_is_tuple:
c, h = state
else:
c, h = tf.split(state, 2, 3)
concat = _conv_linear([inputs, h], self.filter_size, self.num_features * 4, True)
# i = input_gate, j = new_input, f = forget_gate, o = output_gate
i, j, f, o = tf.split(concat, 4, 3)
new_c = (c * tf.nn.sigmoid(f + self._forget_bias) + tf.nn.sigmoid(i) *
self._activation(j))
new_h = self._activation(new_c) * tf.nn.sigmoid(o)
if self._state_is_tuple:
new_state = LSTMStateTuple(new_c, new_h)
else:
new_state = tf.concat([new_c, new_h], 3)
return new_h, new_state
这一段我需要更详细解释
def _conv_linear(args, filter_size, num_features, bias, bias_start=0.0, scope=None):
dtype = [a.dtype for a in args][0]
with slim.arg_scope([slim.conv2d], stride=1, padding='SAME', activation_fn=None, scope=scope,
weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0e-3),
biases_initializer=bias and tf.constant_initializer(bias_start, dtype=dtype)):
if len(args) == 1:
res = slim.conv2d(args[0], num_features, [filter_size[0], filter_size[1]], scope='LSTM_conv')
else:
res = slim.conv2d(tf.concat(args, 3), num_features, [filter_size[0], filter_size[1]], scope='LSTM_conv')
return res
这一段也详细解释