第五章
#退化学习率
import tensorflow.compat.v1 as tf###
tf.disable_v2_behavior()###tf1 api
global_step = tf.Variable(0, trainable=False)
initial_learning_rate = 0.1
learning_rate = tf.train.exponential_decay(initial_learning_rate,global_step=global_step,decay_steps=10,decay_rate=0.9)
opt = tf.train.GradientDescentOptimizer(learning_rate)
add_global = global_step.assign_add(1) #令global_step加一计步
with tf.Session() as sess:
#init = tf.global_variables_initializer()
#sess.run(init)
tf.global_variables_initializer().run()
print(sess.run(learning_rate))
for i in range(21):
i, rate = sess.run([add_global, learning_rate])
print(i,rate)
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
print ('输入数据:',mnist.train.images)
print ('输入数据打shape:',mnist.train.images.shape)
import pylab
im = mnist.train.images[1]
im = im.reshape(-1,28)
pylab.imshow(im)
pylab.show()
print ('输入数据打shape:',mnist.test.images.shape)
print ('输入数据打shape:',mnist.validation.images.shape)
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from tensorflow.examples.tutorials.mnist import input_data
minist = input_data.read_data_sets("MNIST_data/", one_hot=True)
import pylab
tf.reset_default_graph()
x= tf.placeholder(tf.float32, [None, 784])
y= tf.placeholder(tf.float32, [None, 10])
#训练模型并输出中间状态参数
training_epochs=25#迭代次数
batch_size=100#每次训练取出数据条数
display_step=1#每训练一次把具体的中间状态显示出来
saver = tf.train.Saver()
model_path = "log/521model.ckpt"
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for epoch in range(training_epochs):
avg_cost=0
total_batch = int(mnist.train.num_examples/batch_size)
for i in range(total_batch):
batch_xs,batch_ys = mnist.train.next_batch(batch_size)
_, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,y: batch_ys})
avg_cost += c / total_batch ###
if(epoch+1) % display_step ==0:###
print("Epoch:", "%04d" % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
print("Fished!")
#测试modle
correct_prediction = tf.equal(tf.argmax(pred, 1),tf.argmax(y,1))
#tf.equal(A, B)是对比这两个矩阵或者向量的相等的元素,如果是相等的那就返回True,反正返回False,返回的值的矩阵维度和A是一样的
#计算准确率
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))#cast:数据类型转换 reduce_mean:均值
print("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
#保存模型
save_path = saver.save(sess, model_path)
print("Model saved in file: %s" % save_path)
#读取模型
print("Starting 2nd session....")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
###恢复模型
saver.restore(sess, model_path)
correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
print("Accuracy:", accuracy.eval({x: mnist.test.images,y:mnist.test.labels}))
output = tf.argmax(pred, 1)
batch_xs, batch_ys = mnist.train.next_batch(2)
outputval,predv = sess.run([output,pred], feed_dict={x:batch_xs})
print(outputval,predv,batch_xs)
im = batch_xs[0]
im = im.reshape(-1, 28)
pylab.imshow(im)
pylab.show()
im = batch_xs[1]
im = im.reshape(-1, 28)
pylab.imshow(im)
pylab.show()