已知模型如下
layers = [
%imageInputLayer([num_dim num_seq 1],Name="img_in")
sequenceInputLayer([num_dim num_seq 1],Name="input")
% 卷積層
%convolution2dLayer([3, 3], 32, "Padding", "same", "Name", "conv1") % 第一個卷積層
%reluLayer("Name", "relu1")
%maxPooling2dLayer(2, "Stride", 2, "Name", "maxpool1") % 最大池化層
%convolution2dLayer([3, 3], 64, "Padding", "same", "Name", "conv2") % 第二個卷積層
%reluLayer("Name", "relu2")
%maxPooling2dLayer(2, "Stride", 2, "Name", "maxpool2") % 最大池化層
% 特征提取模块
convolution2dLayer([1 3], 2,"Padding", "same", "Name", "conv_1") % 建立卷积层, 卷积核大小[2, 1], 32个特征图
reluLayer("Name", "relu_1") % Relu 激活层
convolution2dLayer([1 3], 4, "Padding", "same","Name", "conv_2")
reluLayer("Name", "relu_2")
%reshapeLayer([13,1,1], "Name", "a_reshape", OperationDimension="spatial-channel" ); % 增加 S 維度
%globalAveragePooling2dLayer
%globalAveragePooling1dLayer
%functionLayer(squeezeDim, "Name", "a_squ")
%globalAveragePooling1dLayer
% fullyConnectedLayer(8,"Name","fc")
%fullyConnectedLayer(4,"Name","a_fc_1")
% reluLayer("Name","a_relu")
% fullyConnectedLayer(13,"Name","a_fc_2")
% sigmoidLayer("Name","a_sigmoid")
% reshapeLayer([13,1,1], "Name", "a_reshape", OperationDimension="spatial-channel" ); % 增加 S 維度
% multiplicationLayer(2,"Name","a_mul");
%---------------
flattenLayer("Name", "flatten_in"),
%positionEmbeddingLayer(13,maxPosition,Name="pos-emb"),
positionEmbeddingLayer(num_dim*num_seq*4,maxPosition,Name="pos-emb"),
additionLayer(2, Name="add"),
selfAttentionLayer(numHeads,numKeyChannels,'AttentionMask','causal'),
selfAttentionLayer(numHeads,numKeyChannels),
indexing1dLayer("last"),
fullyConnectedLayer(num_class), % 全连接层
softmaxLayer("Name", "softmax") % 這裡是輸出層
classificationLayer("Name", "classification") % 這裡是最終的分類層
];
% 添加層到網絡
lgraph = layerGraph(layers);