解锁lintcode数字图像识别

1,题目来源

http://www.lintcode.com/ai/digit-recognition/overview

做为机器学习&python初学者,给大字分享一下,后面会继续解锁其它题目

2,源码介绍

 Common.py:one-hot函数

 Reader.py:csv文件成batch格式读取

 inference.py:三层神经网络算法

 train.py:训练脚本

 eval.py:交叉验证脚本

  predict.py:预测,生成提交结果

3,源代码

3.1Common.py

#=========================================================================
#copy right 2018  tingwu.all rights reserved
#Date :  2016.03.02
#function: CVS FileReader
#Writer: Tingwu
#email: 18510665908@163.com
#CSVReader.py
#=========================================================================
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import os
import tensorflow as tf
import numpy as np

def oneShot(label_batch,hot_num):
    num_labels = label_batch.shape[0]
    index_offset = np.arange(num_labels) * hot_num
    num_labels_hot = np.zeros((num_labels, hot_num))
    num_labels_hot.flat[index_offset+label_batch.ravel()] = 1.0
    return num_labels_hot

3.2Reader.py:csv

#!/usr/bin/env python
# -*- coding: utf-8 -*-
#=========================================================================
#copy right 2018  tingwu.all rights reserved
#Date :  2016.03.02
#function: CVS FileReader
#Writer: Tingwu
#email: 18510665908@163.com
#CSVReader.py
#=========================================================================


import os
import tensorflow as tf
import numpy as np


import Common

DEFAULT_LABEL = [0]
DEFAULT_FEATURE = [0.0]



class CVSReader(object):

    def __init__(self, filenamequeue, node_count, skiplinenum = 0, labelpos = 0):
        self.__filename = filenamequeue
        self.__nodecount = node_count
        self.__skiplinenum = skiplinenum
        self.__labelpos = labelpos


    # 读取函数定义
    def data_transform(self, a, ):
        print a
        b = np.zeros([28, 28])
        for i in range(0, 27):
            for j in range(0, 27):
                b[i][j] = a[28 * i + j]
        return b

    def read_data(self,file_queue):
        #skip over __skiplinenum of lines
        reader = tf.TextLineReader(skip_header_lines=self.__skiplinenum)
        key, value = reader.read(file_queue)

        defaults = [DEFAULT_FEATURE for i in range(0, self.__nodecount)]
        defaults.insert(self.__labelpos, DEFAULT_LABEL)


        train_item = tf.decode_csv(value,  defaults)

        #normallize datums
        feature = tf.multiply(1.0 / 255.0, train_item[1:])
        label = train_item[0:1]

        return feature, label

    def create_pipeline(self, batch_size=1, num_epochs=None):
        file_queue = tf.train.string_input_producer(self.__filename, shuffle=True, num_epochs=num_epochs)
        example, label = self.read_data(file_queue)
        min_after_dequeue = 1000
        capacity = min_after_dequeue + 3*batch_size

        example_batch, label_batch = tf.train.shuffle_batch(
            [example, label], batch_size=batch_size, capacity=capacity,
            min_after_dequeue=min_after_dequeue, num_threads=1
        )
        return example_batch, label_batch





def Test(filename = [None], feature_num=0,batch_size=100):

    reader = CVSReader(filename, feature_num, skiplinenum=1, labelpos=0)
    xs, ys = reader.create_pipeline(batch_size=batch_size, num_epochs=1000)
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    coord = tf.train.Coordinator()
    with tf.Session() as sess:
        sess.run(init)
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:

            count = 0
            while not coord.should_stop():
                xs_batch, ys_batch = sess.run([xs, ys])

                print xs_batch, Common.oneShot(ys_batch,10)

            count = count + 1

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')

        finally:
            # When done, ask the threads to stop.
            coord.request_stop()

    coord.join(threads)

if __name__ == '__main__':
    BATCH_SIZE = 100
    FEATURE_NUM = 784
    FILENAME = "../Data/train.csv"
    Test(filename=[FILENAME], feature_num=FEATURE_NUM, batch_size=BATCH_SIZE)












3.3 inference.py

# -*- coding: utf-8 -*-
#/*************************************************************************
#    > File Name: inference.cpp
#    > Author:zhangtx
#    > Mail: 18510665908@163.com
#    > Created Time: 2018年03月07日 星期三 10时46分35秒
# ************************************************************************/
import tensorflow as tf
INPUT_NODE = 784
OUTPUT_NODE = 10
LAYER1_NODE = 1000


def get_weight_variable(shape, regularizer):
    weights = tf.get_variable("weights", shape, initializer=tf.truncated_normal_initializer(stddev=0.1))
    if regularizer != None:
        tf.add_to_collection('losses', regularizer(weights))
    return weights


def inference(input_tensor, regularizer):
    with tf.variable_scope('layer1'):
        weights = get_weight_variable([INPUT_NODE, LAYER1_NODE], regularizer)
        biases = tf.get_variable("biases", [LAYER1_NODE],
                                 initializer=tf.constant_initializer(0.0))


        layer1 = tf.nn.relu(tf.matmul(input_tensor, weights) + biases)




    with tf.variable_scope('layer2'):
        weights = get_weight_variable([LAYER1_NODE, OUTPUT_NODE], regularizer)
        biases = tf.get_variable("biases", [OUTPUT_NODE],
                                 initializer=tf.constant_initializer(0.0))


        layer2 = tf.matmul(layer1, weights) + biases

    return layer2

3.4train.py

# -*- coding: utf-8 -*-
#/*************************************************************************
#    > File Name: train.cpp
#    > Author:zhangtx
#    > Mail: 18510665908@163.com 
#    > Created Time: 2018年03月07日 星期三 10时46分35秒
# ************************************************************************/
import os
import tensorflow as tf
import inference
import CSVReader.Reader as Reader
import CSVReader.Common as common


BATCH_SIZE = 100
LEARNING_RATE_BASE = 0.8
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 40000
MOVING_AVERAGE_DECAY = 0.99




MODEL_SAVE_PATH = "/home/zhangtx/ml/Race/model"
MODEL_NAME = "model.ckpt"


DEFAULT_LABEL = [0]
DEFAULT_FEATURE = [0.0]
RECORD_NUM = 42000
FEATURE_NUM = 784


FILENAME = "./Data/train.csv"
TESTFILENAME = "./Data/test.csv"


def train(filename = [None], feature_num=0, batch_size=100):
    with tf.name_scope("input"):
        x = tf.placeholder(tf.float32, [None, inference.INPUT_NODE], name="x-input")
        y_ = tf.placeholder(tf.float32, [None, inference.OUTPUT_NODE], name="y-input")


    regularizer = tf.contrib.layers.l2_regularizer(REGULARAZTION_RATE)
    y = inference.inference(x, regularizer)
    global_step = tf.Variable(0, trainable=False)


    with tf.name_scope("moving_average"):
        variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
        variable_averages_op = variable_averages.apply(tf.trainable_variables())




    with tf.name_scope("loss_function"):
        cross_entroy = tf.nn.sparse_softmax_cross_entropy_with_logits(
            logits=y, labels=tf.argmax(y_, 1))
        cross_entroy_mean = tf.reduce_mean(cross_entroy)
        loss = cross_entroy_mean + tf.add_n(tf.get_collection('losses'))
        tf.summary.scalar("loss",loss)


    with tf.name_scope("train_step"):
        learning_rate = tf.train.exponential_decay(
            LEARNING_RATE_BASE,
            global_step,
            RECORD_NUM/BATCH_SIZE,
            LEARNING_RATE_DECAY)
        train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')


    saver = tf.train.Saver()
    writer = tf.summary.FileWriter("./logs/", tf.get_default_graph())






    reader = Reader.CVSReader(filename, feature_num, skiplinenum=1, labelpos=0)
    xs, ys = reader.create_pipeline(batch_size=batch_size, num_epochs=1000)
    init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
    coord = tf.train.Coordinator()


    with tf.Session() as sess:
        sess.run(init)
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        merged_summary = tf.summary.merge_all()
        try:
            while not coord.should_stop():
                count = 0
                xs_batch, ys_batch = sess.run([xs, ys])
                if count % 100 == 0:
                    run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
                    run_metadata = tf.RunMetadata()
                    _, loss_value, step, summary = sess.run([train_op, loss, global_step, merged_summary],
                                                            feed_dict={x: xs_batch, y_: common.oneShot(ys_batch,10)},
                                                            options=run_options,
                                                            run_metadata=run_metadata)


                    #writer.add_run_metadata(run_metadata, 'step%03d'% count)
                    print("After %d training steps,loss on training batch is %g." % (step, loss_value))
                    saver.save(sess, os.path.join(MODEL_SAVE_PATH, MODEL_NAME), global_step=global_step)


                else:
                    _, loss_value, step, summary = sess.run([train_op, loss, global_step, merged_summary],
                                               feed_dict={x: xs_batch, y_: common.oneShot(ys_batch,10)})
                writer.add_summary(summary, step)




                count = count + 1


        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')


        finally:
            # When done, ask the threads to stop.
            coord.request_stop()


    coord.join(threads)
    writer.close()

def main(argv=None):
    train(filename=[FILENAME], feature_num=FEATURE_NUM, batch_size=BATCH_SIZE)
if __name__ == '__main__':
    tf.app.run()



3.5eval.py

# -*- coding: utf-8 -*-
#/*************************************************************************
#    > File Name: eval.cpp
#    > Author:zhangtx
#    > Mail: 18510665908@163.com 
#    > Created Time: 2018年03月07日 星期三 10时46分35秒
# ************************************************************************/
import time
import tensorflow as tf
import inference
import train
import numpy as np




BATCH_SIZE = 600
LEARNING_RATE_BASE = 0.5
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99




MODEL_SAVE_PATH = "/home/zhangtx/ml/Race/model"
MODEL_NAME = "model.ckpt"


DEFAULT_LABEL = 0
DEFAULT_FEATURE = 0.0


FILENAME = "./Data/eval.csv"


import numpy as np


# from tensorflow.examples.tutorials.mnist import input_data






def oneShot(curr_y_train_batch):
    num_labels = curr_y_train_batch.shape[0]
    index_offset = np.arange(num_labels) * 10
    num_labels_hot = np.zeros((num_labels, 10))
    num_labels_hot.flat[index_offset+curr_y_train_batch.ravel()] = 1.0
    return num_labels_hot


def evalate():
    with tf.Graph().as_default() as g:


        x = tf.placeholder(tf.float32, [None, inference.INPUT_NODE], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, inference.OUTPUT_NODE], name='y-input')


        sess = tf.Session()
        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        sess.run(init)
        y = inference.inference(x, None)


        correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_, 1))


        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))


        variable_average = tf.train.ExponentialMovingAverage(train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_average.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        coord = tf.train.Coordinator()


        test_data = np.loadtxt(open(FILENAME), delimiter=",", skiprows=1)


        tmpXs = test_data[:, 1:] * 1.0/255.0


        ys = np.matrix(test_data[:, 0:1])
        xs = np.matrix(tmpXs)


        curr_x_train_batch = xs.astype(float)
        curr_y_train_batch = ys.astype(int)
        train_batch_labels = oneShot(curr_y_train_batch)


        validata_feed = {x: curr_x_train_batch,
                         y_: train_batch_labels}
        while True:
            ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
            if ckpt and ckpt.model_checkpoint_path:
                saver.restore(sess, ckpt.model_checkpoint_path)
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]


                accuracy_score = sess.run(accuracy, feed_dict=validata_feed)
                print("After %s training steps,validation accuracy =%g" % (global_step, accuracy_score))
            else:
                print('No checkpoint file found')
                return
            time.sleep(10)


        sess.close()








def main(argv=None):
    evalate()
if __name__ == '__main__':
    tf.app.run()


3.6predict.py

# -*- coding: utf-8 -*-
#/*************************************************************************
#    > File Name: predict
#    > Author:zhangtx
#    > Mail: 18510665908@163.com 
#    > Created Time: 2018年03月07日 星期三 10时46分35秒
# ************************************************************************/
import time
import tensorflow as tf
import inference
import train
import numpy as np


BATCH_SIZE = 600
LEARNING_RATE_BASE = 0.5
LEARNING_RATE_DECAY = 0.99
REGULARAZTION_RATE = 0.0001
TRAINING_STEPS = 30000
MOVING_AVERAGE_DECAY = 0.99


MODEL_SAVE_PATH = "/home/zhangtx/ml/Race/model"
MODEL_NAME = "model.ckpt"

DEFAULT_LABEL = 0
DEFAULT_FEATURE = 0.0

FILENAME = "./Data/test.csv"
SUBMISSION = "./Data/submission.csv"
import csv
import numpy as np

# from tensorflow.examples.tutorials.mnist import input_data



def oneShot(curr_y_train_batch):
    num_labels = curr_y_train_batch.shape[0]
    index_offset = np.arange(num_labels) * 10
    num_labels_hot = np.zeros((num_labels, 10))
    num_labels_hot.flat[index_offset+curr_y_train_batch.ravel()] = 1.0
    return num_labels_hot

def evalate():
    with tf.Graph().as_default() as g:

        x = tf.placeholder(tf.float32, [None, inference.INPUT_NODE], name='x-input')
        y_ = tf.placeholder(tf.float32, [None, inference.OUTPUT_NODE], name='y-input')

        sess = tf.Session()
        init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
        sess.run(init)
        y = inference.inference(x, None)
        prediction = tf.argmax(y,1)

        variable_average = tf.train.ExponentialMovingAverage(train.MOVING_AVERAGE_DECAY)
        variables_to_restore = variable_average.variables_to_restore()
        saver = tf.train.Saver(variables_to_restore)
        coord = tf.train.Coordinator()

        test_data = np.loadtxt(open(FILENAME), delimiter=",", skiprows=1)

        tmpXs = test_data * 1.0/255.0

        xs = np.matrix(tmpXs)

        curr_x_train_batch = xs.astype(float)


        # validata_feed = {x: mnist.validation.images,
        #                  y_: mnist.validation.labels}
        #
        predict_feed = {x: curr_x_train_batch}

        ckpt = tf.train.get_checkpoint_state(train.MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            predictvalue = sess.run(prediction, feed_dict=predict_feed)
            with open(SUBMISSION, "w") as csvfile:
                writer = csv.writer(csvfile)
                writer.writerow(["ImageId", "Label"])
                for i in range(28000):
                    writer.writerow([i+1, predictvalue[i]])

            print predictvalue.shape
        sess.close()




def main(argv=None):
    evalate()
if __name__ == '__main__':
    tf.app.run()

机器学习  lintcode 数字图像识别 
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值