Tensorflow tf.GradientTape() 查看每一次epoch参数更新

本文通过一个简单的线性模型演示如何利用Tensorflow的tf.GradientTape API来观察每次epoch中参数的变化,展示从初始损失到收敛过程,并最终得到优化后的权重W和偏置B。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

这是一个非常简单的例子。

拟合 y  = x * 3 + 2(其中x是一维,y是是实数)

import tensorflow as tf

tf.enable_eager_execution()
tf.executing_eagerly()
tfe = tf.contrib.eager

NUM_EAMPLES = 1000
training_inputs = tf.random_normal([NUM_EAMPLES])
noise = tf.random_normal([NUM_EAMPLES])
training_outputs = training_inputs*3 + 2 + noise

def prediction(input,weight,bias):
    return input * weight + bias

def loss(weights,biases):
    error = prediction(training_inputs,weights,biases) - training_outputs
    return tf.reduce_mean(tf.square(error))

def grad(weights,biases):
    with tf.GradientTape() as tape:
        loss_value = loss(weights,biases)
    return tape.gradient(loss_value,[weights,biases])

traing_steps = 200
learning_rate = 0.01
W = tf.Variable(0.)
B = tf.Variable(0.)

print("Initial loss: {:.3f}".format(loss(W,B)))
for i in range(traing_steps):
    dw,db = grad(W,B)
    W.assign_sub(dw*learning_rate)
    B.assign_sub(db*learning_rate)
    if i%20 == 0:
        pr
代码检查 # 初始化模型和优化器 generator = make_generator_model() discriminator = make_discriminator_model() cross_entropy = tf.keras.losses.BinaryCrossentropy() generator_optimizer = tf.keras.optimizers.Adam(1e-4) discriminator_optimizer = tf.keras.optimizers.Adam(1e-4) # 定义损失函数 def discriminator_loss(real_output, fake_output): real_loss = cross_entropy(tf.ones_like(real_output), real_output) fake_loss = cross_entropy(tf.zeros_like(fake_output), fake_output) return real_loss + fake_loss def generator_loss(fake_output): return cross_entropy(tf.ones_like(fake_output), fake_output) # 训练步骤 @tf.function def train_step(images): noise = tf.random.normal([BATCH_SIZE, NOISE_DIM]) with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape: generated_images = generator(noise, training=True) real_output = discriminator(images, training=True) fake_output = discriminator(generated_images, training=True) gen_loss = generator_loss(fake_output) disc_loss = discriminator_loss(real_output, fake_output) gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables) gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables) generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables)) discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables)) return gen_loss, disc_loss # 训练循环 def train(dataset, epochs): for epoch in range(epochs): for image_batch in dataset: gen_loss, disc_loss = train_step(image_batch) # 每5个epoch输出一次生成结果 if (epoch + 1) % 5 == 0: generate_and_save_images(generator, epoch + 1, tf.random.normal([16, NOISE_DIM])) print(f'Epoch {epoch+1}, Gen Loss: {gen_loss}, Disc Loss: {disc_loss}') # 生成示例图像 def generate_and_save_images(model, epoch, test_input): predictions = model(test_input, training=False) fig = plt.figure(figsize=(4,4)) for i in range(predictions.shape[0]): plt.subplot(4, 4, i+1) plt.imshow(predictions[i, :, :, 0] * 127.5 + 127.5, cmap='gray') plt.axis('off') plt.savefig('image_at_epoch_{:04d}.png'.format(epoch)) plt.show() # 开始训练 train(train_dataset, EPOCHS)
03-14
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值