使用Python写Caffe的网络配置文件

上一篇博客写了手写字体的识别(使用mnist举得例子),但是mnist的网络跟AlexNet模型差别很大,所有又使用Python写了一个AlexNet的网络,代码如下:

# -*- coding: utf-8 -*-
"""
使用python写Caffe的网络,生成Caffe的网络配置文件
"""

import sys
caffe_root="/home/pcb/caffe/"
sys.path.insert(0,caffe_root+"python")
from caffe import layers as L
from caffe import params as P
import caffe

def lenet(lmdb, batch_size,mean_file,model):

    n=caffe.NetSpec()
    #数据层
    if model==False:
        n.data,n.label=L.Data(batch_size=batch_size,backend=P.Data.LMDB,source=lmdb,
                              include=dict(phase=0),transform_param=dict(scale=1./255,mirror=True,
                              crop_size=227,mean_file=mean_file),ntop=2)
    if model==True:
        n.data,n.label = L.Data(batch_size=batch_size, backend=P.Data.LMDB, source=lmdb,
                                 include=dict(phase=1), transform_param=dict(scale=1. / 255,
                                mirror=True,crop_size=227,mean_file=mean_file), ntop=2)

    #卷积层conv1
    n.conv1=L.Convolution(n.data,param = [dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)],
                          kernel_size=11,stride=4,num_output=96,weight_filler=dict(type="gaussian",std=0.01),
                          bias_filler=dict(type='constant',value=0))
    #ReLu层
    n.relu1 = L.ReLU(n.conv1, in_place=True)

    #LRN层
    n.norm1=L.LRN(n.conv1,local_size=5,alpha=0.0001,beta=0.75)

    #Pooling层
    n.pool1=L.Pooling(n.norm1,kernel_size=3,stride=2,pool=P.Pooling.MAX)

    #卷积层conv2
    n.conv2 = L.Convolution(n.pool1,param = [dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)],
                            kernel_size=5,num_output=256,pad=2,group=2,weight_filler=dict(type="gaussian",std=0.01),
                            bias_filler=dict(type='constant',value=0.1))

    # ReLu2层
    n.relu2 = L.ReLU(n.conv2, in_place=True)

    # LRN2层
    n.norm2 = L.LRN(n.conv2, local_size=5, alpha=0.0001, beta=0.75)

    # Pooling2层
    n.pool2 = L.Pooling(n.norm2, kernel_size=3, stride=2, pool=P.Pooling.MAX)


    # 卷积层conv3
    n.conv3 = L.Convolution(n.pool2, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                            kernel_size=3, num_output=384, pad=1, weight_filler=dict(type="gaussian", std=0.01),
                            bias_filler=dict(type='constant', value=0))
    # ReLu3层
    n.relu3 = L.ReLU(n.conv3, in_place=True)


    # 卷积层conv4
    n.conv4 = L.Convolution(n.conv3, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                            kernel_size=3, num_output=384, pad=1, group=2,
                            weight_filler=dict(type="gaussian", std=0.01),
                            bias_filler=dict(type='constant', value=0.1))
    # ReLu4层
    n.relu4 = L.ReLU(n.conv4, in_place=True)


    # 卷积层conv5
    n.conv5 = L.Convolution(n.conv4, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                            kernel_size=3, num_output=256, pad=1, group=2,
                            weight_filler=dict(type="gaussian", std=0.01),
                            bias_filler=dict(type='constant', value=0.1))
    # ReLu5层
    n.relu5 = L.ReLU(n.conv5, in_place=True)

    # Pooling5层
    n.pool5 = L.Pooling(n.conv5, kernel_size=3, stride=2, pool=P.Pooling.MAX)

    #全连接层fc6
    n.fc6 = L.InnerProduct(n.pool5, param = [dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)],
                           num_output=4096, weight_filler=dict(type="gaussian",std=0.005),
                           bias_filler=dict(type='constant',value=0.1))

    n.relu6 = L.ReLU(n.fc6, in_place=True)

    #Dropout6层
    n.drop6=L.Dropout(n.fc6,dropout_ratio=0.5,in_place=True)    #丢弃数据的概率

    # 全连接层fc7
    n.fc7 = L.InnerProduct(n.fc6, param=[dict(lr_mult=1,decay_mult=1), dict(lr_mult=2,decay_mult=0)],
                           num_output=4096, weight_filler=dict(type="gaussian", std=0.005),
                           bias_filler=dict(type='constant', value=0.1))

    # ReLu7层
    n.relu7 = L.ReLU(n.fc7, in_place=True)

    # Dropout7层
    n.drop7 = L.Dropout(n.fc7,dropout_ratio=0.5,in_place=True)  # 丢弃数据的概率

    # 全连接层fc8
    n.fc8 = L.InnerProduct(n.fc7, param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)],
                           num_output=1000, weight_filler=dict(type="gaussian", std=0.01),
                           bias_filler=dict(type='constant', value=0))


    n.loss = L.SoftmaxWithLoss(n.fc8, n.label)

    return n.to_proto()


with open('/home/pcb/caffe/python/Project/AlexNet_train.prototxt', 'w') as f:
    f.write(str(lenet('/home/pcb/caffe/examples/mnist/mnist_train_lmdb', 256,
                      "/home/pcb/caffe/examples/mnist/mean.binaryproto", False)))

with open('/home/pcb/caffe/python/Project/AlexNet_test.prototxt', 'w') as f:
   f.write(str(lenet('/home/pcb/caffe/examples/mnist/mnist_test_lmdb', 100,
                     "/home/pcb/caffe/examples/mnist/mean.binaryproto",True)))

这样就会在相应的文件夹下生成相应的AlexNet的网络配置文件

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值