tensorflow-chp05

该代码示例展示了如何用TensorFlow构建一个简单的神经网络模型,对MNIST手写数字数据集进行预处理,然后训练模型并计算测试集的准确性。模型包含多层感知机,使用ReLU激活函数和均方误差作为损失函数。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

#coding:utf-8
import tensorflow as tf

lr = 0.01

def preprocess(x, y):
    x = tf.cast(x, dtype=tf.float32)/255.
    x = tf.reshape(x, [-1,28*28])
    y = tf.cast(y, dtype=tf.int32)
    y = tf.one_hot(y, depth=10)
    return x,y

if __name__ == '__main__':
    (x_train,y_train),(x_test,y_test) = tf.keras.datasets.mnist.load_data()
    train_db = tf.data.Dataset.from_tensor_slices((x_train,y_train))
    train_db = train_db.shuffle(10000)
    train_db = train_db.batch(128)
    train_db = train_db.map(preprocess)

    w1 = tf.Variable(tf.random.truncated_normal([784,256],stddev=0.1))
    b1 = tf.Variable(tf.zeros([256]))
    w2 = tf.Variable(tf.random.truncated_normal([256,128],stddev=0.1))
    b2 = tf.Variable(tf.zeros([128]))
    w3 = tf.Variable(tf.random.truncated_normal([128,10],stddev=0.1))
    b3 = tf.Variable(tf.zeros([10]))

    for epoch in range(20):
        for step,(x,y) in enumerate(train_db):
            with tf.GradientTape() as tape:
                h1 = x@w1 + b1
                h1 = tf.nn.relu(h1)
                h2 = h1@w2 + b2
                h2 = tf.nn.relu(h2)
                out = h2@w3 + b3
                loss = tf.keras.losses.mse(y,out)
                loss = tf.reduce_mean(loss)
                grads = tape.gradient(loss,[w1,b1,w2,b2,w3,b3])
            w1.assign_sub(lr*grads[0])
            b1.assign_sub(lr*grads[1])
            w2.assign_sub(lr*grads[2])
            b2.assign_sub(lr*grads[3])
            w3.assign_sub(lr*grads[4])
            b3.assign_sub(lr*grads[5])

    test_db = tf.data.Dataset.from_tensor_slices((x_test,y_test))
    test_db = test_db.map(preprocess)
    total_correct = 0
    total = x_test.shape[0]
    for x,y in test_db:
        h1 = x@w1 + b1
        h1 = tf.nn.relu(h1)
        h2 = h1@w2 + b2
        h2 = tf.nn.relu(h2)
        out = h2@w3 + b3
        pred = tf.argmax(out, axis=1)
        y = tf.argmax(y, axis=0)
        correct = tf.equal(pred, y)
        total_correct += tf.reduce_sum(tf.cast(correct,dtype=tf.int32)).numpy()
    print("acc:"+str(total_correct/total))
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值