说明:
课程小作业,简单记录
有用链接:
做法:
- 加载包
import tensorflow as tf
from tensorflow import keras
import numpy as np
from tensorflow.keras import layers, optimizers, datasets, Model
from tensorflow.keras.layers import Dense, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Input, Activation
- 手写数字数据
mnist = datasets.mnist
(x0, y0), (x1, y1) = datasets.mnist.load_data()
train_x = x0.reshape(-1,784)
train_y = x0.reshape(-1,784)
test_x = x1.reshape(-1,784)
test_y = x1.reshape(-1,784)
- 简单的encoder-decoder
(784-1000-500-250-2-250-500-1000-784)的前向NN。
#encoder
enc_input = Input([784,])
x = Dense(1000,activation= 'relu')(input_x)
x = Dense(500,activation= 'relu')(x)
x = Dense(250,activation= 'relu')(x)
enc_output = Dense(2)(x)
encoder = Model(input_x,enc_output)
#decoder
dec_input = Input([2,])
x = Dense(250,activation= 'relu')(dec_input)
x = Dense(500,activation= 'relu')(x)
x = Dense(1000,activation= 'relu')(x)
x = Dense(784)(x)
dec_output = x
decoder = Model(dec_input,dec_output)
#合并
input_x = Input([784,])
code = encoder(input_x )
output = decoder(code)
model = Model(input_x,output)
- 训练,提前停止,画loss曲线
- 训练
optimizer = optimizers.Adam(0.00001)
model.compile(optimizer=optimizer,
loss='mse')
early_stopping=tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=1,
patience=5, verbose=0, mode='auto',
baseline=None, restore_best_weights=False)
history = model.fit(train_x,train_y,validation_data = (test_x,test_y),batch_size =128,epochs = 300,callbacks = [early_stopping])
- 画图
plt.figure()
epochs=range(len(history.history['loss']))
plt.plot(epochs,history.history['loss'],'b',label='Training loss')
plt.plot(epochs,history.history['val_loss'],'r',label='Validation val_loss')
plt.title('Traing and Validation loss')
plt.legend()
plt.show()

- 查看encoder结果(二维)的散点图
inp = test_x[-1000:].reshape((-1,784))
code_x = encoder(inp).numpy()
dt = []
for i in range(10):
dt.append(code_x[y1[-1000:]==i])
color =['c', 'b', 'g', 'r', 'orange', 'y', 'k', 'silver','pink','purple']
plt.figure(figsize=(12,12))
for i in range(10):
plot = dt[i]
plt.scatter(plot[:,0],plot[:,1],c=color[i],label = i)
plt.legend(loc = 'best')
plt.show()

- 查看decoder生成的效果
import random
decode = np.array(random.sample(list(code_x),20))
print(decode.shape)
pred_dec = decoder(decode).numpy().reshape(-1,28,28)
print(pred_dec.shape)
fig, axes = plt.subplots(4, 5,figsize=(5, 5))
for j in range(4):
for i in range(5):
axes[j][i].imshow(pred_dec[i*(j+1)])
axes[j][i].axis('off')
plt.show()

还可以尝试:
- VAE
- GAN
本文详细介绍使用TensorFlow和Keras构建Autoencoder的过程,通过手写数字数据集MNIST进行训练,实现图像压缩与重构。文章包括加载数据、设计网络结构、训练模型、绘制损失曲线及结果展示。
394

被折叠的 条评论
为什么被折叠?



