def add_layer(inputs,input_size,output_size,activation_function=None):
with tf.variable_scope(“Weights”):
Weights = tf.Variable(tf.random_normal(shape=[input_size,output_size]),name=“weights”)
with tf.variable_scope(“biases”):
biases = tf.Variable(tf.zeros(shape=[1,output_size]) + 0.1,name=“biases”)
with tf.name_scope(“Wx_plus_b”):
Wx_plus_b = tf.matmul(inputs,Weights) + biases
with tf.name_scope(“dropout”):
Wx_plus_b = tf.nn.dropout(Wx_plus_b,keep_prob=keep_prob_s)
if activation_function is None:
return Wx_plus_b
else:
with tf.name_scope(“activation_function”):
return activation_function(Wx_plus_b)
作者:DannyHau
来源:优快云
原文:https://blog.youkuaiyun.com/mvs2008/article/details/75577459
版权声明:本文为博主原创文章,转载请附上博文链接!