fcn网络8s复现

在这里插入图片描述
FCN.py

import numpy as np
import tensorflow as tf
import scipy.io as scio
from scipy import misc
import sys
import logging
import datetime


FLAGS=tf.flags.FLAGS
tf.flags.DEFINE_integer('batchsize','10','trainning batchsize')#flag
tf.flags.DEFINE_float('learning_rate','1e-4','learning_rate')#flag
tf.flags.DEFINE_bool('reuse', "False", "reuse the pretrained model")
tf.flags.DEFINE_bool('train', "True", "train or test")
tf.flags.DEFINE_string('checkpoint', "checkpoint", "dir to save model")
tf.flags.DEFINE_string('log', "log", "dir to summary")


IMAGE_SIZE=224
NUM_OF_CLASSESS = 151
NUM_EPOCHES=100001


def initLogging(logFilename='record.log'):
  """Init for logging
  """
  logging.basicConfig(
                    level= logging.DEBUG,
                    format='%(asctime)s-%(levelname)s-%(message)s',
                    datefmt  = '%y-%m-%d %H:%M',
                    filename = logFilename,
                    filemode = 'w');
  console = logging.StreamHandler()
  console.setLevel(logging.INFO)
  formatter = logging.Formatter('%(asctime)s-%(levelname)s-%(message)s')
  console.setFormatter(formatter)
  logging.getLogger('').addHandler(console)
  
  
initLogging()


    
    
def read_record(filename,h,w):
    filename_quene=tf.train.string_input_producer([filename],shuffle=False)
   
    train_reader=tf.TFRecordReader()
    _,serialized_example=train_reader.read(filename_quene)
    
    
    features=tf.parse_single_example(serialized_example,features={
   
   
            'imgs': tf.FixedLenFeature([],tf.string),
            'label': tf.FixedLenFeature([],tf.string) })
    
    img=tf.decode_raw(features['imgs'],tf.uint8)
    img=tf.reshape(img,[h,w,3])
    
    label=tf.decode_raw(features['label'],tf.uint8)
    label=tf.reshape(label,[h,w])
    
    return img,label



train_img,train_label=read_record('train.tfrecord',224,224)   
train_img_batch,train_label_batch=tf.train.batch([train_img,train_label],batch_size=10,capacity=200,num_threads=6)
train_label_batch=tf.expand_dims(train_label_batch,-1)#expand dim
    
val_img,val_label=read_record('val.tfrecord',224,224)   
val_img_batch,val_label_batch=tf.train.batch([val_img,val_label],batch_size=10,capacity=200,num_threads=6)
val_label_batch=tf.expand_dims(val_label_batch,-1)#expand dim



##########################pretrained vgg19#####################################   
pre_train_model_data=scio.loadmat('imagenet-vgg-verydeep-19.mat')
weights=weights = np.squeeze(pre_train_model_data['layers'])  #squeeze


layers = (
        'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1',

        'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2',

        'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3',
        'relu3_3', 'conv3_4', 'relu3_4', 'pool3',

        'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3',
        'relu4_3', 'conv4_4', 'relu4_4', 'pool4',

        'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3',
        'relu5_3', 'conv5_4', 'relu5_4'
    )
weight_list=[]
bias_list=[]
for i, name in enumerate(layers):
    if name[:4]=='conv':
        kernels, bias = weights[i][0][0][0][0]
        weight_list.append(np.transpose(kernels,axes=(1, 0, 2, 3)))
        bias_list.append(bias.reshape(-1))


def conv_bias_relu(input_tensor,scope_name,ind):
    with tf.variable_scope(scope_name):
        init=tf.constant_initializer(value=weight_list[ind],dtype=tf.float32)
   
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值