简单的TF实现,用于识别minst手写数字
首先设置库与导入数据:
import tensorflow as tf
import numpy as np
#input_data文件为我自己的读取mnist数据库代码文件
import input_data
readData = input_data.read_data_sets("/home/hanchao/mnist" , one_hot=True)
设置参数:
learning_rate = 0.01
x = tf.placeholder("float",[None,784])
y = tf.placeholder("float",[None,10])
W = tf.Variable(tf.zeros([784,10], "float", "weight"))
b = tf.Variable(tf.zeros([10],"float","bais"))
模型:
activation = tf.nn.softmax(tf.matmul(x, W)+b)
#cost结果为互熵cross entropy
cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(activation),1))
#梯度下降法最小化互熵
opt = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
init = tf.initialize_all_variables()
进行训练:
with tf.Session() as sess:
sess.run(init)
#train过程
for step in range(25):
avg_cost = 0.
#分为多个batch训练
totalStep = (int)(readData.train.num_examples/100)
for i in range(totalStep):
images,labels = readData.train.next_batch(100)
sess.run(opt,feed_dict={x:images,y:labels})
avg_cost += sess.run(cost,feed_dict={x:images,y:labels})/totalStep
print "Step: ","%04d"%step," cost: ",avg_cost
#test过程
#读取测试样本
testImages,testLabels = readData.test.next_batch(200)
#初始化准确率
accuracy = 0.
#获得activation中每行最大值的下标作为预测结果
predictResult = tf.arg_max(activation, 1)
#测试样本共200个sample,遍历对每个sample进行测试
for i in range(200):
#p为第i个样本的分类结果
p = sess.run(predictResult[i],feed_dict={x:testImages})
#label为真实的样本标签
label = np.argmax(testLabels, 1)
l = label[i]
#分类正确
if(p == l):
accuracy += 1.0/200.0
print "Predict Result: ",p," True Label: ",l
print "accuracy: ",accuracy
总结:通过设置参数就能调整模型,并能得到很高的的准确度