- 测试使用的网络配置文件和训练时的不一样,因为作者提供的就不一样
- 把
MobileNetSSD_train.prototxt复制一份,根据MobileNetSSD_deploy.prototxt把输入和输出改了就行
- 最终内容如下
name: "MobileNet-SSD"
input: "data"
input_shape {
dim: 1
dim: 3
dim: 300
dim: 300
}
layer {
name: "conv0"
type: "Convolution"
bottom: "data"
top: "conv0"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 32
bias_term: false
pad: 1
kernel_size: 3
stride: 2
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv0/bn"
type: "BatchNorm"
bottom: "conv0"
top: "conv0"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv0/scale"
type: "Scale"
bottom: "conv0"
top: "conv0"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv0/relu"
type: "ReLU"
bottom: "conv0"
top: "conv0"
}
layer {
name: "conv1/dw"
type: "Convolution"
bottom: "conv0"
top: "conv1/dw"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 32
bias_term: false
pad: 1
kernel_size: 3
group: 32
#engine: CAFFE
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv1/dw/bn"
type: "BatchNorm"
bottom: "conv1/dw"
top: "conv1/dw"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv1/dw/scale"
type: "Scale"
bottom: "conv1/dw"
top: "conv1/dw"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv1/dw/relu"
type: "ReLU"
bottom: "conv1/dw"
top: "conv1/dw"
}
layer {
name: "conv1"
type: "Convolution"
bottom: "conv1/dw"
top: "conv1"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: false
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv1/bn"
type: "BatchNorm"
bottom: "conv1"
top: "conv1"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv1/scale"
type: "Scale"
bottom: "conv1"
top: "conv1"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv1/relu"
type: "ReLU"
bottom: "conv1"
top: "conv1"
}
layer {
name: "conv2/dw"
type: "Convolution"
bottom: "conv1"
top: "conv2/dw"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 64
bias_term: false
pad: 1
kernel_size: 3
stride: 2
group: 64
#engine: CAFFE
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv2/dw/bn"
type: "BatchNorm"
bottom: "conv2/dw"
top: "conv2/dw"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv2/dw/scale"
type: "Scale"
bottom: "conv2/dw"
top: "conv2/dw"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv2/dw/relu"
type: "ReLU"
bottom: "conv2/dw"
top: "conv2/dw"
}
layer {
name: "conv2"
type: "Convolution"
bottom: "conv2/dw"
top: "conv2"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: false
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv2/bn"
type: "BatchNorm"
bottom: "conv2"
top: "conv2"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv2/scale"
type: "Scale"
bottom: "conv2"
top: "conv2"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv2/relu"
type: "ReLU"
bottom: "conv2"
top: "conv2"
}
layer {
name: "conv3/dw"
type: "Convolution"
bottom: "conv2"
top: "conv3/dw"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
group: 128
#engine: CAFFE
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv3/dw/bn"
type: "BatchNorm"
bottom: "conv3/dw"
top: "conv3/dw"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv3/dw/scale"
type: "Scale"
bottom: "conv3/dw"
top: "conv3/dw"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv3/dw/relu"
type: "ReLU"
bottom: "conv3/dw"
top: "conv3/dw"
}
layer {
name: "conv3"
type: "Convolution"
bottom: "conv3/dw"
top: "conv3"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: false
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv3/bn"
type: "BatchNorm"
bottom: "conv3"
top: "conv3"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv3/scale"
type: "Scale"
bottom: "conv3"
top: "conv3"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv3/relu"
type: "ReLU"
bottom: "conv3"
top: "conv3"
}
layer {
name: "conv4/dw"
type: "Convolution"
bottom: "conv3"
top: "conv4/dw"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 128
bias_term: false
pad: 1
kernel_size: 3
stride: 2
group: 128
#engine: CAFFE
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv4/dw/bn"
type: "BatchNorm"
bottom: "conv4/dw"
top: "conv4/dw"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv4/dw/scale"
type: "Scale"
bottom: "conv4/dw"
top: "conv4/dw"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv4/dw/relu"
type: "ReLU"
bottom: "conv4/dw"
top: "conv4/dw"
}
layer {
name: "conv4"
type: "Convolution"
bottom: "conv4/dw"
top: "conv4"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 256
bias_term: false
kernel_size: 1
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv4/bn"
type: "BatchNorm"
bottom: "conv4"
top: "conv4"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv4/scale"
type: "Scale"
bottom: "conv4"
top: "conv4"
param {
lr_mult: 0.1
decay_mult: 0.0
}
param {
lr_mult: 0.2
decay_mult: 0.0
}
scale_param {
filler {
value: 1
}
bias_term: true
bias_filler {
value: 0
}
}
}
layer {
name: "conv4/relu"
type: "ReLU"
bottom: "conv4"
top: "conv4"
}
layer {
name: "conv5/dw"
type: "Convolution"
bottom: "conv4"
top: "conv5/dw"
param {
lr_mult: 0.1
decay_mult: 0.1
}
convolution_param {
num_output: 256
bias_term: false
pad: 1
kernel_size: 3
group: 256
#engine: CAFFE
weight_filler {
type: "msra"
}
}
}
layer {
name: "conv5/dw/bn"
type: "BatchNorm"
bottom: "conv5/dw"
top: "conv5/dw"
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
param {
lr_mult: 0
decay_mult: 0
}
}
layer {
name: "conv5/dw