卷积层的实现方式:
- tf.nn.conv2d:最基础的用法,需要自定义权重和偏置。用法示例:
def create_weights(shape): return tf.Variable(tf.truncated_normal(shape,stddev=0.1)) def create_bias(num_filters): return tf.Variable(tf.constant(0.05,shape=num_filters)) def create_conv_layer(input,num_channels,filtr_size,num_filters): weights=create_weights(shape=[input,num_channels,filtr_size,num_filter]) bias=create_bias(num_filters) layer=tf.nn.conv2d(input=input,filter=weights,stride=[1,1,1,1],padding='same') layer+=bias layer=tf.nn.max_pool(layer,ksize=[1,2,2,1],strides=[1,2,2,1],padding='same') return layer
- tf.contrib.layers.conv2d:高级API。用法示例:
tf.nn.conv2d( input, filter, strides, padding, use_cudnn_on_gpu=True, data_format='NHWC', dilations=[1, 1, 1, 1], name=None )
- tf.contrib.slim.conv2d:更高级的API。用法示例:
def mynet(input,reuse=False): with tf.variable_scope("conv1") as scope: net=tf.contrib.layers.conv2d(input, 32, [5,5], padding='SAME', weights_initializer=tf.contrib.layers.xavier_initializer(uniform=False), scope=scope, reuse=reuse) net=tf.contrib.layers.max_pool2d(net, [2,2], padding='SAME') with tf.variable_scope("conv2") as scope: ... net=tf.contrib.layers.flatten(net) return net
它的repeat函数可以快速定义层,如定义VGG16模型:
def vgg16(inputs): with slim.arg_scope([slim.conv2d, slim.fully_connected], activation_fn=tf.nn.relu, weights_initializer=tf.truncated_normal_initializer(0.0, 0.01), weights_regularizer=slim.l2_regularizer(0.0005)): net = slim.repeat(inputs, 2, slim.conv2d, 64, [3, 3], scope='conv1') net = slim.max_pool2d(net, [2, 2], scope='pool1') net = slim.repeat(net, 2, slim.conv2d, 128, [3, 3], scope='conv2') net = slim.max_pool2d(net, [2, 2], scope='pool2') net = slim.repeat(net, 3, slim.conv2d, 256, [3, 3], scope='conv3') net = slim.max_pool2d(net, [2, 2], scope='pool3') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv4') net = slim.max_pool2d(net, [2, 2], scope='pool4') net = slim.repeat(net, 3, slim.conv2d, 512, [3, 3], scope='conv5') net = slim.max_pool2d(net, [2, 2], scope='pool5') net = slim.fully_connected(net, 4096, scope='fc6') net = slim.dropout(net, 0.5, scope='dropout6') net = slim.fully_connected(net, 4096, scope='fc7') net = slim.dropout(net, 0.5, scope='dropout7') net = slim.fully_connected(net, 1000, activation_fn=None, scope='fc8') return net