import numpy as np
import matplotlib.pyplot as plt
import keras
import random, csv
from keras import layers, models
from keras.models import Model
from keras.datasets import mnist
from keras.models import load_model, Sequential
from keras.preprocessing import image
from keras.layers import Dense, Dropout, Activation, Flatten, Conv2D
from keras.layers.convolutional import Convolution2D, MaxPooling2D
from keras.utils import np_utils, to_categorical
from tensorflow.examples.tutorials.mnist import input_data
from dnn_utils_v2 import plt_sample
from inputDataProc import intialize_data_TS
def xxCNN(train_x, train_y, train_y_orig, test_x, test_y, test_y_orig, label_interval):
classes = len(label_interval)
#plt_sample(train_y, test_y, label_interval)
print(train_x.shape)
print(test_x.shape)
print(train_y.shape)
print(test_y.shape)
#one-hot编码
train_y = np_utils.to_categorical(train_y, classes)
test_y = np_utils.to_categorical(test_y, classes)
#kernel_regularizer=keras.regularizers.l2(0.0001))
#model = load_model('D://xx.h5')
#'''
model = Sequential()
model.add(Convolution2D(1, kernel_size=(1, 3), strides=1, padding='valid', input_shape=(1, train_x.shape[2], train_x.shape[3]), data_format = "channels_first", kernel_regularizer=keras.regularizers.l2(0)))
model.add(Activation('relu'))
#model.add(Dropout(0.1))
#model.add(Conv2D(16, kernel_size=(2, 2), strides=2, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
#model.add(MaxPooling2D(pool_size=(2, 2)))
#model.add(Dropout(0.1))
#model.add(Conv2D(16, kernel_size=(2, 2), strides=2, activation='relu', kernel_regularizer=keras.regularizers.l2(0.001)))
#model.add(Dropout(0.1))
#model.add(Conv2D(16, (2, 1), activation='relu'))
model.add(Flatten())
model.add(Dense(64, kernel_regularizer=keras.regularizers.l2(0.001)))
model.add(Activation('relu'))
model.add(Dropout(0.1))
model.add(Dense(classes)) #类别个数
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy', optimizer='sgd', metrics=['accuracy'])
model.summary()
model.get_config()
#reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', patience=1, mode='auto')
history = model.fit(train_x, train_y, batch_size=256, epochs=50, verbose=1, validation_data=(test_x, test_y))#, callbacks=[reduce_lr])
score = model.evaluate(test_x, test_y, verbose=1)
#模型保存JSON文件
model_json = model.to_json()
with open("D://xx.json", 'w') as file:
file.write(model_json)
#保存模型权重值
model.save('D://xx.h5')
#'''
#'''
test_xx = test_x[0]
test_x_temp = test_xx.reshape([10, 37])
#取一个样本写入csv文件,测试c++代码使用
with open("D://xx.csv", "w", newline='') as csvfile:
writer = csv.writer(csvfile)
#写入多行用writerows
writer.writerows(test_x_temp)
csvfile.close()
test_xx = test_xx.reshape([1, 1, 10, 37])
conv2d_1_model = Model(inputs = model.input, outputs = model.get_layer('conv2d_1').output)
conv2d_1_res = conv2d_1_model.predict(test_xx)
dense_1_model = Model(inputs = model.input, outputs = model.get_layer('dense_1').output)
dense_1_res = dense_1_model.predict(test_xx)
dense_2_model = Model(inputs = model.input, outputs = model.get_layer('dense_2').output)
dense_2_res = dense_2_model.predict(test_xx)
#'''
result = model.predict(test_xx)
print(result)
result = np.argmax(result, axis = 1)
print(result)
out_res(result, test_y_orig, label_interval)
print(result)
print('Test score:', score[0])
print('Test accuracy:', score[1])
score = model.evaluate(train_x, train_y, verbose=1)
result = model.predict(train_x)
result = np.argmax(result, axis=1)
out_res(result, train_y_orig, label_interval)
print(result)
print('Test score:', score[0])
print('Test accuracy:', score[1])
基于 Keras 搭建 CNN 模型
最新推荐文章于 2022-08-02 10:46:51 发布