问题描述:
用keras实现DeepDream。我们将从一个在 ImageNet 上预训练的卷积神经网络开始。本次选择的模型是Inception模型
实现步骤:
下载数据集和设置DeepDream配置
from tensorflow import keras
import matplotlib.pyplot as plt
base_image_path = keras.utils.get_file(
"coast.jpg", origin="https://img-datasets.s3.amazonaws.com/coast.jpg")
plt.axis("off")
plt.imshow(keras.utils.load_img(base_image_path))
#instantiating a model
from tensorflow.keras.applications import inception_v3
model = inception_v3.InceptionV3(weights='imagenet',include_top=False)
#配置各层对DeepDream损失的贡献
layer_settings = {
"mixed4": 1.0,
"mixed5": 1.5,
"mixed6": 2.0,
"mixed7": 2.5,
}
outputs_dict = dict(
[
(layer.name, layer.output)
for layer in [model.get_layer(name) for name in layer_settings.keys()]
]
)
feature_extractor = keras.Model(inputs=model.inputs, outputs=outputs_dict)
定义最大化的损失和梯度上升过程
#定义损失函数
import tensorflow as tf
def compute_loss(input_image):
features = feature_extractor(input_image)
loss = tf.zeros(shape=())
for name in features.keys():
coeff = layer_settings[name]
activation = features[name]
loss += coeff * tf.reduce_mean(tf.square(activation[:, 2:-2, 2:-2, :]))
return loss
#梯度上升过程
@tf.function
def gradient_ascent_step(image, learning_rate):
with tf.GradientTape() as tape:
tape.watch(image)
loss = compute_loss(image)
grads = tape.gradient(loss, image)
grads = tf.math.l2_n