FCNN和CNN笔记

全连接神经网络训练

import tensorflow as tf
import numpy as np
import os
from matplotlib import pyplot as plt
# 将打印项目全部不以省略号的形式输出
np.set_printoptions(threshold=np.inf)

# 下载数据集
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# Sequential
model = tf.keras.models.Sequential([
    # 将输入拉直为一维
    tf.keras.layers.Flatten(),
    # 全连接神经网络,128个神经元,激活函数relu
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')
])

# compile 配置函数
model.compile(
    # 优化器 adam
    optimizer='adam',
    # 交叉熵损失函数,输出结果符合概率分布 因此from_logits=False
    loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
    # y_整数 y经过one_hot符合概率分布
    metrics=['sparse_categorical_accuracy'])

# 断点续训
# 读取参数
checkpoint_save_path = "./checkpoint/mnist.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------------load the model----------------')
    model.load_weights(checkpoint_save_path)
# 存储新一轮参数
cp_callback = tf.keras.callbacks.ModelCheckpoint(
    # 存储路径
    filepath=checkpoint_save_path,
    save_weights_only=True,
    save_best_only=True)

# 执行训练过程
history = model.fit(
    x_train, y_train, batch_size=32, epochs=5,
    # 测试集,  还可以用validation_split=0.2 将训练集分出百分之二十作为测试集
    validation_data=(x_test, y_test),
    # 每一轮epoch使用测试集测试一次结果
    validation_freq=1,
    callbacks=[cp_callback])

# 打印结果
model.summary()

# 参数提取
# 打印可训练的参数
print(model.trainable_variables)
file = open('./weights.txt', 'w')
# 写入每个参数
for v in model.trainable_variables:
    file.write(str(v.name)+'\n')
    file.write(str(v.shape)+'\n')
    file.write(str(v.numpy)+'\n')
file.close()

# #################### show ##################### #

# 显示训练集和验证集的acc和loss曲线
# 提取准确率 损失值
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

# 绘制图像
# 绘制一行两列的图像,这段代码先画出第一行
plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
# 绘制
plt.legend()

# 绘制一行两列的图像,这段代码先画出第二行
plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
# 绘制
plt.legend()

# 展示
plt.show()


全连接神经网络预测

from PIL import Image
import numpy as np
import tensorflow as tf

model_save_path = './checkpoint/mnist.ckpt'

model = tf.keras.models.Sequential([
    tf.keras.layers.Flatten(),
    tf.keras.layers.Dense(128, activation='relu'),
    tf.keras.layers.Dense(10, activation='softmax')])

model.load_weights(model_save_path)

preNum = int(input("input the number of test pictures:"))

for i in range(preNum):
    image_path = input("the path of test picture:")
    img = Image.open(image_path)
    img = img.resize((28, 28), Image.ANTIALIAS)
    img_arr = np.array(img.convert('L'))
# 灰度增强
    for i in range(28):
        for j in range(28):
            if img_arr[i][j] < 200:
                img_arr[i][j] = 255
            else:
                img_arr[i][j] = 0

    img_arr = img_arr / 255.0
    x_predict = img_arr[tf.newaxis, ...]
    result = model.predict(x_predict)

    pred = tf.argmax(result, axis=1)

    print('\n')
    tf.print(pred)

















基本卷积神经网络

import tensorflow as tf
import os
import numpy as np
from matplotlib import pyplot as plt
from tensorflow.keras.layers import Conv2D, BatchNormalization, Activation, MaxPool2D, Dropout, Flatten, Dense
from tensorflow.keras import Model

# 输出结果不省略
np.set_printoptions(threshold=np.inf)

# 下载数据集
cifar10 = tf.keras.datasets.cifar10
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# 符合概率分布
x_train, x_test = x_train / 255.0, x_test / 255.0

# 定义各层
class Baseline(Model):
    def __init__(self):
        super(Baseline, self).__init__()
        # 卷积层 6个卷积核 每个卷积核5*5*3尺寸 输入图像特征为三通道 卷积核默认深度为3,步长默认1,全零填充
        self.c1 = Conv2D(filters=6, kernel_size=(5, 5), padding='same')
        self.b1 = BatchNormalization()  # BN层 每个卷积核包含可更新的参数γ和β 放入卷积层和激活层中间 LeNet时代还没有批归一化
        self.a1 = Activation('relu')  # 激活层 LeNet时代一般使用sigmoid激活函数
        self.p1 = MaxPool2D(pool_size=(2, 2), strides=2, padding='same')  # 池化层 最大池化 池化核2*2尺寸,步长为2,全零填充
        self.d1 = Dropout(0.2)  # dropout层 随机休眠百分之二十的神经元

        self.flatten = Flatten()  # 拉直输入特征
        self.f1 = Dense(128, activation='relu')  # 128个神经元的FNN
        self.d2 = Dropout(0.2)
        self.f2 = Dense(10, activation='softmax')

    def call(self, x):
        x = self.c1(x)
        x = self.b1(x)
        x = self.a1(x)
        x = self.p1(x)
        x = self.d1(x)

        x = self.flatten(x)
        x = self.f1(x)
        x = self.d2(x)
        y = self.f2(x)
        return y


model = Baseline()

model.compile(optimizer='adam',
              loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
              metrics=['sparse_categorical_accuracy'])

# 定义出存放文件的路径和文件名
# 生成ckpt文件会同时生成索引表index
# 只是定义 并未生成
checkpoint_save_path = "./checkpoint/Baseline.ckpt"
if os.path.exists(checkpoint_save_path + '.index'):
    print('-------------load the model-----------------')
    # 通过load_weights(存储路径)读取已有模型参数
    model.load_weights(checkpoint_save_path)

# callback 断点续训
# filepath=path 保存训练参数
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_save_path,
                                                 save_weights_only=True,  # 是否只保留模型参数
                                                 save_best_only=True)  # 是否只保留最优结果
# callback加入fit回调
history = model.fit(x_train, y_train, batch_size=32, epochs=5, validation_data=(x_test, y_test), validation_freq=1,
                    callbacks=[cp_callback])
model.summary()

# print(model.trainable_variables)
# 写入参数txt文件 (可自创建
file = open('./weights.txt', 'w')
for v in model.trainable_variables:
    file.write(str(v.name) + '\n')
    file.write(str(v.shape) + '\n')
    file.write(str(v.numpy()) + '\n')
file.close()

# ##############################################    show   ###############################################

# 显示训练集和验证集的acc和loss曲线
acc = history.history['sparse_categorical_accuracy']
val_acc = history.history['val_sparse_categorical_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']

plt.subplot(1, 2, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.legend()

plt.subplot(1, 2, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.title('Training and Validation Loss')
plt.legend()
plt.show()

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

plus_left

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值