model.save()保存模型遭遇NotImplementedError报错

复现论文程序遇到的问题

复现论文:Deep Feature Fusion via Two-Stream Convolutional Neural Network for Hyperspectral Image Classification
作者:Xian Li , Student Member , IEEE, Mingli Ding, and Aleksandra Pižurica, Senior Member , IEEE

下面附上完整代码

import keras
import tensorflow as tf
from keras import regularizers
from tensorflow.keras.layers import Conv2D, Conv3D, Flatten, Dense, Reshape, BatchNormalization,GlobalAveragePooling2D,MaxPooling2D,Activation
from tensorflow.keras.layers import Dropout, Input,Lambda
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import ModelCheckpoint
from keras.utils import np_utils

from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
from sklearn.metrics import confusion_matrix, accuracy_score, classification_report, cohen_kappa_score

from operator import truediv
from plotly.offline import init_notebook_mode

import numpy as np
import matplotlib.pyplot as plt
import scipy.io as sio
import os
import spectral
import glob

def getData():
    dataset=sio.loadmat(r'D:\Program Files (x86)\Anaconda\jupyter_file_path\data\Indian_pines_corrected.mat')['indian_pines_corrected']
    labels=sio.loadmat(r'D:\Program Files (x86)\Anaconda\jupyter_file_path\data\Indian_pines_gt.mat')['indian_pines_gt']
    return dataset,labels


def pca_change(X,num_components):
    newX=np.reshape(X,(-1,X.shape[2]))
    pca=PCA(n_components=num_components,whiten=True)
    newX=pca.fit_transform(newX)
    newX=np.reshape(newX,(X.shape[0],X.shape[1],num_components))
    return newX


def padwithzeros(X,margin=2):
    newX=np.zeros((X.shape[0]+2*margin,X.shape[1]+2*margin,X.shape[2]))
    x_offset=margin
    y_offset=margin
    newX[x_offset:X.shape[0]+x_offset,y_offset:X.shape[1]+y_offset,:]=X
    return newX


def creatCube(X, y, windowsize=25, removeZeroLabels=True):
    margin = int((windowsize - 1) / 2)  # margin=12
    zeroPaddedX = padwithzeros(X, margin=margin)

    patchesData = np.zeros((X.shape[0] * X.shape[1], windowsize, windowsize, X.shape[2]))
    patchesLabels = np.zeros(X.shape[0] * X.shape[1])
    patchIndex = 0
    for r in range(margin, zeroPaddedX.shape[0] - margin):
        for c in range(margin, zeroPaddedX.shape[1] - margin):
            patch = zeroPaddedX[r - margin:r + margin + 1, c - margin:c + margin + 1]
            patchesData[patchIndex, :, :, :] = patch
            patchesLabels[patchIndex] = y[r - margin, c - margin]
            patchIndex = patchIndex + 1
    if removeZeroLabels:
        patchesData = patchesData[patchesLabels > 0, :, :, :]
        patchesLabels = patchesLabels[patchesLabels > 0]
        patchesLabels -= 1
    return patchesData, patchesLabels

def splitTrainTest(X,Y,Ratio,randoms=2019):
    X_train,X_test,Y_train,Y_test=train_test_split(X,Y,test_size=Ratio,random_state=randoms,
                                                  stratify=Y)
    return X_train,X_test,Y_train,Y_test



X_Global,Y_Global=getData()
X_Local,Y_Local=getData()

X_Global=pca_change(X_Global,num_components=3)
X_Local=pca_change(X_Local,num_components=20)

X_Local,Y_Local=creatCube(X_Local,Y_Local,windowsize=7)
X_Global,Y_Global=creatCube(X_Global,Y_Global,windowsize=27)

X_Local_train,X_Local_test,Y_Local_train,Y_Local_test=splitTrainTest(X_Local,Y_Local,0.9)
X_Global_train,X_Global_test,Y_Global_train,Y_Global_test=splitTrainTest(X_Global,Y_Global,0.9)

Y_Local_test=np_utils.to_categorical(Y_Local_test)
Y_Local_train=np_utils.to_categorical(Y_Local_train)
Y_Global_train=np_utils.to_categorical(Y_Global_train)


class Squeeze_excitation_layer(tf.keras.Model):
    def __init__(self, filter_sq, filter_ex):
        super().__init__()
        self.filter_sq = filter_sq
        self.filter_ex = filter_ex
        self.avepool = GlobalAveragePooling2D()
        self.dense1 = Dense(filter_sq)
        self.relu = Activation('relu')
        self.dense2 = Dense(filter_ex)
        self.sigmoid = Activation('sigmoid')

    def call(self, inputs):
        squeeze = self.avepool(inputs)

        excitation = self.dense1(squeeze)
        excitation = self.relu(excitation)
        excitation = self.dense2(excitation)
        excitation = self.sigmoid(excitation)
        excitation = Reshape((1, 1, self.filter_ex))(excitation)

        scale = inputs * excitation
        return scale


def SE_Conv_moule_1(input_layer):
    Conv_layer1 = Conv2D(filters=192, kernel_size=(1, 1), padding='same', activation='relu')(input_layer)
    Conv_layer1 = BatchNormalization()(Conv_layer1)
    output_layer = Squeeze_excitation_layer(192, 192)(Conv_layer1)
    return output_layer


def SE_Conv_moule_2(input_layer):
    Conv_layer1=Conv2D(filters=192,kernel_size=(3,3),padding='same',activation='relu')(input_layer)
    Conv_layer1=BatchNormalization()(Conv_layer1)
    output_layer=Squeeze_excitation_layer(192,192)(Conv_layer1)
    return output_layer

def SE_Conv_moule_3(input_layer):
    Conv_layer1=Conv2D(filters=128,kernel_size=(3,3),padding='same',activation='relu')(input_layer)
    Conv_layer1=BatchNormalization()(Conv_layer1)
    output_layer=Squeeze_excitation_layer(128,128)(Conv_layer1)
    return output_layer


def SE_Res_module(input_layer):
    layer1=BatchNormalization()(input_layer)
    layer2=Conv2D(filters=128,kernel_size=(3,3),padding='same',activation='relu')(layer1)
    layer3=BatchNormalization()(layer2)
    layer4=Conv2D(filters=128,kernel_size=(3,3),padding='same')(layer3)
    layer5=BatchNormalization()(layer4)
    layer6=Squeeze_excitation_layer(128,128)(layer5)
    layer7=tf.add(layer6,layer1)
    layer8=Activation('relu')(layer7)
    output_layer=BatchNormalization()(layer8)
    return output_layer


def Local(input_layer):
    layer2=SE_Conv_moule_1(input_layer)
    layer3=SE_Conv_moule_2(layer2)
    layer4=SE_Conv_moule_2(layer3)
    layer5=SE_Conv_moule_3(layer4)
    output_layer=MaxPooling2D(padding='valid')(layer5)
    return output_layer


def Global(input_layer):
    layer2=SE_Conv_moule_3(input_layer)
    layer3=MaxPooling2D(padding='valid')(layer2)
    layer4=SE_Res_module(layer3)
    layer5=SE_Res_module(layer4)
    layer6=MaxPooling2D(padding='valid')(layer5)
    layer7=SE_Conv_moule_3(layer6)
    layer8=MaxPooling2D(padding='valid')(layer7)
    layer9=SE_Conv_moule_3(layer8)
    output_layer=MaxPooling2D(strides=1,padding='same')(layer9)
    return output_layer


#local input_shape (7,7,20)
#Global input_shape(27,27,3)
Local_W=7
Local_n_component=20
Global_W=27
Global_n_component=3


input_layer_local=Input((Local_W,Local_W,Local_n_component),name='input_layer_local') #input_layer_local=(7,7,20)
input_layer_Global=Input((Global_W,Global_W,Global_n_component),name='input_layer_Global')#input_layer_Glbal=(27,27,3)


output_layer_local=Local(input_layer_local)    #Local 输出 (none,3,3,64)
output_layer_Global=Global(input_layer_Global) #Global 输出 (none,3,3,64)
concat_layer=tf.concat([output_layer_local,output_layer_Global],axis=3)
flatten_layer=Flatten()(concat_layer)
Fully_connect_layer1=Dense(units=200,activation='sigmoid',kernel_regularizer=regularizers.l2(0.02))(flatten_layer)
Fully_conncet_layer2=Dense(units=100,activation='sigmoid')(Fully_connect_layer1)
output_layer_final=Dense(units=16,activation='softmax',name='output_layer_final')(Fully_conncet_layer2)

model=Model(inputs=[input_layer_local, input_layer_Global],outputs=output_layer_final)

#model.summary()

adam=Adam(lr=0.01)
model.compile(loss='categorical_crossentropy',optimizer=adam,metrics=['accuracy'])

filepath='Two_Stream.hdf5'
checkpoint=ModelCheckpoint(filepath,
                           monitor='loss',
                           verbose=1,
                           save_best_only=True,
                           mode='min')
callback_list=[checkpoint]

history=model.fit([X_Local_train,X_Global_train],
                  Y_Local_train,
                  epochs=400,
                  batch_size=40,
                  callbacks=callback_list)

问题:

26/26 [==============================] - ETA: 0s - loss: 2.9780 - accuracy: 0.5225
Epoch 00001: loss improved from inf to 2.97797, saving model to Two_Stream.hdf5
Traceback (most recent call last):
  File "D:/Program Files (x86)/pycharm/pythonProject/AI/papers/Deep Feature Fusion via Two-Stream.py", line 208, in <module>
    callbacks=callback_list)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\engine\training.py", line 66, in _method_wrapper
    return method(self, *args, **kwargs)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\engine\training.py", line 876, in fit
    callbacks.on_epoch_end(epoch, epoch_logs)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\callbacks.py", line 365, in on_epoch_end
    callback.on_epoch_end(epoch, logs)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\callbacks.py", line 1177, in on_epoch_end
    self._save_model(epoch=epoch, logs=logs)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\callbacks.py", line 1214, in _save_model
    self.model.save(filepath, overwrite=True)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\engine\network.py", line 1052, in save
    signatures, options)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\saving\save.py", line 135, in save_model
    model, filepath, overwrite, include_optimizer)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\saving\hdf5_format.py", line 109, in save_model_to_hdf5
    model_metadata = saving_utils.model_metadata(model, include_optimizer)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\saving\saving_utils.py", line 155, in model_metadata
    raise e
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\saving\saving_utils.py", line 152, in model_metadata
    model_config['config'] = model.get_config()
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\engine\network.py", line 968, in get_config
    return copy.deepcopy(get_network_config(self))
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\engine\network.py", line 2119, in get_network_config
    layer_config = serialize_layer_fn(layer)
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\utils\generic_utils.py", line 275, in serialize_keras_object
    raise e
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\utils\generic_utils.py", line 270, in serialize_keras_object
    config = instance.get_config()
  File "D:\Program Files (x86)\Anaconda\envs\TF2.1\lib\site-packages\tensorflow\python\keras\engine\network.py", line 967, in get_config
    raise NotImplementedError
NotImplementedError

Process finished with exit code 1

原因在于模型无法保存,model.save( *.h5)时就会爆出NotImplementedError
暂时还没有解决方法

只有放弃model.save()
转而使用model.save_weights()
退而求其次,save()就是一直失败,但是只保留权重数值weights也不妨碍在测试集上用模型进行测试

最新的说法

NotImplementedError: Saving the model to HDF5 format requires the model to be a Functional model or a Sequential model. It does not work for subclassed models, because such models are defined via the body of a Python method, which isn’t safely serializable. Consider saving to the Tensorflow SavedModel format (by setting save_format=“tf”) or using save_weights.

评论 4
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值