caffe---之python接口写braintumor网络,里面包含accuracy,loss层

自己用python接口写了一个BrainTumorLGG的代码,python接口中相对于prototxt有一定的规律。如果查不到,可以参照prototxt中的一些代码来写python.

我写的代码如下:里面包含了一些accuracy和损失层的写法

# -*- coding: utf-8 -*-
import numpy as np
import caffe 
from caffe import layers as L,params as P


def Brain_tumor_LGG(lmdb,batch_size):
    n = caffe.NetSpec()
    n.data,n.label = L.Data(batch_size = batch_size,include = {'phase' : caffe.TRAIN},
                                     backend = P.Data.LMDB,source = lmdb,ntop = 2)
    n.conv1 = L.Convolution(n.data,kernel_size = 3,num_output = 64,pad = 1,
			    weight_filler=dict(type='xavier'),bias_filler = dict(type='constant',value=0.1))
    n.relu1 = L.ReLU(n.conv1,in_place = True,relu_param={'negative_slope':0.333})
    
    n.conv2 = L.Convolution(n.relu1,kernel_size = 3,num_output = 64,pad = 1,
                            weight_filler = dict(type='xavier'),bias_filler = dict(type='constant',value=0.1))
    
    n.relu2 = L.ReLU(n.conv2,in_place = True,relu_param = {'negative_slope':0.333})
    
    #n.pool1 = L.Pooling(n.relu1,kernel_size = 3 ,stride = 2,pool = P.Pooling.MAX)
    #如果查不到caffe的一些python接口,其实可以对着protoxtxt文件,写出一些参数,如下:(只不过有些关键字如TRAIN,应该用caff.TRAIN)
    n.pool1 = L.Pooling(n.relu1,pooling_param = {'kernel_size':3 ,'stride':2,'pool':P.Pooling.MAX})
    
    
    n.conv3 = L.Convolution(n.pool1,kernel_size = 3,num_output = 128,pad = 1 ,
                            weight_filler = dict(type='xavier'),bias_filler = dict(type='constant',value=0.1))
    n.relu3 = L.ReLU(n.conv3,in_place = True,relu_param = {'negative_slope':0.333})
    
    n.conv4 = L.Convolution(n.relu3,kernel_size = 3,num_output = 128,pad = 1 ,
                            weight_filler = dict(type='xavier'),bias_filler = dict(type='constant',value=0.1))
    n.relu4 = L.ReLU(n.conv4,in_place = True ,relu_param = {'negative_slope':0.333}) 
    
    n.pool2 = L.Pooling(n.relu4,kernel_size = 3,stride = 2,pool = P.Pooling.MAX)
    
    n.fc1 = L.InnerProduct(n.pool2,num_output = 256,
                           weight_filler = dict(type = 'xavier'),bias_filler = dict(type='constant',value=0.1))
    n.relu5 = L.ReLU(n.fc1,in_place = True,relu_param = {'negative_slope':0.333})

    n.dropout1 = L.Dropout(n.relu5,dropout_ratio=0.1,in_place=True)
    
    n.fc2 = L.InnerProduct(n.relu5,num_output = 256,
                           weight_filler = dict(type = 'xavier'),bias_filler = dict(type='constant',value=0.1))
    n.relu6 = L.ReLU(n.fc2,in_place = True,relu_param = {'negative_slope':0.333})

    n.dropout1 = L.Dropout(n.relu6,dropout_ratio=0.1,in_place=True)
    
    n.fc3 = L.InnerProduct(n.relu6,num_output = 5,
                           weight_filler = dict(type = 'xavier'),bias_filler = dict(type='constant',value=0.1))

    n.dropout1 = L.Dropout(n.fc3,dropout_ratio=0.1,in_place=True)

    n.loss = L.SoftmaxWithLoss(n.dropout1, n.label)
    n.acc = L.Accuracy(n.dropout1, n.label,include = {'phase':caffe.TEST})
    
    return n.to_proto()

if __name__ == "__main__":  
    with open('BrainTumoeLGG.prototxt', 'w') as f:
        f.write(str(Brain_tumor_LGG('mnist/mnist_train_lmdb', 128)))
    print 'done'

如果是编写delpoy.prototxt

则用下面的代码来代替上面代码的数据层

n.data = L.Input(input_param={'shape':{'dim':[1,3,224,224]}})
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值