自己训练神经网络时,可能需要:1. fine-tune 预训练好的模型; 2. 分阶段训练。 这时tf.Saver()的使用必不可少。以下是自己使用这个函数的一些心得。
示例代码:
import tensorflow as tf
import numpy as np
def add_layer(x, kin, kout, name, trainable=True):
W = tf.Variable(tf.random_normal([kin, kout])*(2/kin)**0.5, name='W'+name, trainable=trainable)
b = tf.Variable(tf.ones([kout])*0.1, name='b'+name, trainable=trainable)
return tf.nn.relu(tf.matmul(x, W) + b)
x = np.linspace(0, 1)[:, np.newaxis].astype(np.float32)
y = x**2 + 0.1
net = add_layer(x, 1, 10, '1')
net = add_layer(net, 10, 100, '2', trainable=False)
net = add_layer(net, 100, 1, '3')
loss = tf.reduce_mean(tf.square(net - y))
train = tf.train.AdamOptimizer(0.001).minimize(loss)
sess = tf.Session()
#v_list = [v for v in tf.global_variables() if 'Adam' in v.name or 'beta' in v.name]
#sess.run(tf.initialize_variables(v_list))
sess.run(tf.global_variables_initializer())
v_list = [v for v in tf.global_variables() if 'Adam' not in v.name and 'beta' not i