```python
from __future__ import print_function
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/home/ts/PycharmProjects/tf_tt/ts/tensorflow/pys/test1/tmp/tensorflow/mnist/input_data", one_hot=False)
print(mnist.train.labels.shape)
import tensorflow as tf
# ????
learning_rate = 0.1 # ???
num_steps = 1000 # ????
batch_size = 128 # ?????
display_step = 100 # ????
# ????
n_hidden_1 = 256 # ?????????
n_hidden_2 = 256 # ?????????
num_input = 784 # 28*28
num_classes = 10 # ????
#??????
def neural_net(x_dict):
x=x_dict['images']
layer_1=tf.layers.dense(x,n_hidden_1) #????
layer_2=tf.layers.dense(layer_1,n_hidden_2) #????
out_layer=tf.layers.dense(layer_2,num_classes) #????????
return out_layer
def model_fn(features, labels, mode):
logits = neural_net(features) # ??
# ??
pred_classes = tf.argmax(logits, axis=1)
pred_probas = tf.nn.softmax(logits)
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode, predictions=pred_classes)
# ?????????
loss_op = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=tf.cast(labels, dtype=tf.int32)))
optimizer = tf.train.GradientDescentOptimizer(learning_rate=learning_rate) # ???????
train_op = optimizer.minimize(loss_op, global_step=tf.train.get_global_step()) # ?????
acc_op = tf.metrics.accuracy(labels=labels, predictions=pred_classes) # ??
estim_specs = tf.estimator.EstimatorSpec(mode=mode, predictions=pred_classes, loss=loss_op, train_op=train_op,
eval_metric_ops={'accuracy': acc_op})
return estim_specs
model = tf.estimator.Estimator(model_fn)
input_fn = tf.estimator.inputs.numpy_input_fn(x={'images': mnist.train.images}, y=mnist.train.labels,
batch_size=batch_size, num_epochs=None, shuffle=True)
model.train(input_fn, steps=num_steps)
input_fn = tf.estimator.inputs.numpy_input_fn(x={'images': mnist.test.images}, y=mnist.test.labels,
batch_size=batch_size, shuffle=False)
e = model.evaluate(input_fn)
print("准确率", e['accuracy'])
```