net = tf_util.fully_connected(net, 512, bn=True, is_training=is_training,
scope='tfc1', bn_decay=bn_decay)
net = tf_util.fully_connected(net, 256, bn=True, is_training=is_training,
scope='tfc2', bn_decay=bn_decay)
看下具体实现,bn代表batch norm,batch方向的归一化处理
def fully_connected(inputs,
num_outputs,
scope,
use_xavier=True,
stddev=1e-3,
weight_decay=0.0,
activation_fn=tf.nn.relu,
bn=False,
bn_decay=None,
is_training=None):
""" Fully connected layer with non-linear operation.
Args:
inputs: 2-D tensor BxN
num_outputs: int
Returns:
Variable tensor of size B x num_outputs.
"""
with tf.variable_scope(scope) as sc:
num_input_units = inputs.get_shape()[-1].value
weights = _variable_with_weight_decay('weights',
shape=[num_input_units, num_outputs],
use_xavier=use_xavier,
stddev=stddev,
wd=weight_decay)
outputs = tf.matmul(inputs, weights)
biases = _variable_on_cpu('biases', [num_outputs],
tf.constant_initializer(0.0))
outputs = tf.nn.bias_add(outputs, biases)
if bn:
outputs = batch_norm_for_fc(outputs, is_training, bn_decay, 'bn')
if activation_fn is not None:
outputs = activation_fn(outputs)
return outputs
num_input_units=1024,输出为512在pointnet分类工程中,则声明一个1024,512的weight的tensor一个biases,512,进行归一化
最后设置激活函数,依然是relu
所以,fullyconnet实际是一个正向x*w+b的一个操作,只是通过控制w来控制输出。没有卷积的动作
本文深入解析了PointNet分类工程中fully_connected层的具体实现,包括如何利用权重矩阵和偏置项进行前向传播,以及BatchNorm在训练过程中的作用。通过实例展示了1024到512维度的转换过程,强调了ReLU激活函数的应用。
3397

被折叠的 条评论
为什么被折叠?



