1、数据集
数据集为经典mnist。
2、实验部分
2.1 模块加载
import tensorflow.compat.v1 as tf
import ssl
from tensorflow.examples.tutorials.mnist import input_data
错误收录:
1、No module named 'tensorflow.examples'
在D:\ProgramSoftware\Anaconda3\Lib\site-packages\tensorflow 下缺少examples文件夹
解决方案:
补充exmples文件夹
链接:https://pan.baidu.com/s/1zP8wZ_WT2GBlLhICmFFIqA
提取码:1234
2.2 加载数据
ssl._create_default_https_context = ssl._create_unverified_context;
mnist = input_data.read_data_sets("./mnist_data/",one_hot=True)
train_x,train_y = mnist.train.next_batch(100)
print(train_x.shape)
2.3 网络结构
learning_rate = 0.005
training_epochs = 20
batch_size = 100
#550
batch_count = int(mnist.train.num_examples/batch_size)
n_hidden_1 = 256
n_hidden_2 = 256
n_input = 784
n_classes = 10
#占位符
tf.disable_eager_execution()
x = tf.placeholder(tf.float32,[None,n_input]) #(None,784)
y = tf.placeholder(tf.float32,[None,n_classes]) #(None,10)
tf.random_normal(shape, mean=0.0, stddev=1.0, dtype=tf.float32, seed=None, name=None)
用于从“服从指定正态分布的序列”中随机取出指定个数的值
参数:
shape: 输出张量的形状,必选
mean: 正态分布的均值,默认为0
stddev: 正态分布的标准差,默认为1.0
dtype: 输出的类型,默认为tf.float32
seed: 随机数种子,是一个整数,当设置之后,每次生成的随机数都一样
name: 操作的名称
weights={
"weight1":tf.Variable(tf.random_normal([n_input,n_hidden_1])), #(784,256)
"weight2":tf.Variable(tf.random_normal([n_hidden_1,n_hidden_2])), #(256,256)
"out":tf.Variable(tf.random_normal([n_hidden_2,n_classes])) #(256,10)
}
biases={
"bias1":tf.Variable(tf.random_normal([n_hidden_1])), #(256)
"bias2":tf.Variable(tf.random_normal([n_hidden_2])), #(256)
"out":tf.Variable(tf.random_normal([n_classes])) #(10)
}
def multilayer_perceptorn_model(x):
layer_1 = tf.add(tf.matmul(x,weights["weight1"]),biases["bias1"])
layer_2 = tf.add(tf.matmul(layer_1,weights["weight2"]),biases["bias2"])
out_layer = tf.matmul(layer_2,weights["out"])+biases["out"]
return out_layer;
#网络结构层
logtis = multilayer_perceptorn_model(x)
#损失函数
loss_op = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=logtis,labels=y))
#优化器
optimizer = tf.train.GradientDescentOptimizer(learning_rate) #learning_rate=0.005
train_op = optimizer.minimize(loss_op)
#准确率
pred = tf.nn.softmax(logtis)
correct_prediction = tf.equal(tf.argmax(pred,1),tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction,tf.float32))
#参数初始化
init = tf.global_variables_initializer()
2.4 网络训练
with tf.Session() as sess:
sess.run(init)
for epoch in range(training_epochs):
avg_cost = 0
for i in range(batch_count):
train_x,train_y = mnist.train.next_batch(batch_size)
_,c,a=sess.run([train_op,loss_op,accuracy],feed_dict={x:train_x,y:train_y})
avg_cost+=c/batch_count
print("Epoch:",'%02d'%(epoch+1),"avg cost={:.6f}".format(avg_cost ),"accuracy={:.6f}".format(a))
notebook代码下载:
链接:https://pan.baidu.com/s/1GiZ4UxbqRIEdzM2bHC_WWQ
提取码:1234