paddle 动态图编程

本文介绍如何使用PaddlePaddle深度学习框架训练MNIST手写数字识别模型,包括数据预处理、模型构建、训练过程及评估方法。
部署运行你感兴趣的模型镜像

1.paddle https://www.paddlepaddle.org.cn/  

2.数据是mnist,把mnisit数据集合变成 img \t label形式

#coding:utf-8
from __future__ import print_function
import numpy as np
import paddle.fluid as fluid
import paddle
import os
from PIL import Image

cluster_train_dir = "./train_data"
cluster_test_dir = "./test_data"

class SimpleImgConvPool(fluid.dygraph.Layer):
    def __init__(self,
                 name_scope,
                 num_filters,
                 filter_size,
                 pool_size,
                 pool_stride,
                 pool_padding=0,
                 pool_type='max',
                 global_pooling=False,
                 conv_stride=1,
                 conv_padding=0,
                 conv_dilation=1,
                 conv_groups=1,
                 act=None,
                 use_cudnn=False,
                 param_attr=None,
                 bias_attr=None):
        super(SimpleImgConvPool, self).__init__(name_scope)

        self._conv2d = fluid.dygraph.Conv2D(
            self.full_name(),
            num_filters=num_filters,
            filter_size=filter_size,
            stride=conv_stride,
            padding=conv_padding,
            dilation=conv_dilation,
            groups=conv_groups,
            param_attr=None,
            bias_attr=None,
            act=act,
            use_cudnn=use_cudnn)

        self._pool2d = fluid.dygraph.Pool2D(
            self.full_name(),
            pool_size=pool_size,
            pool_type=pool_type,
            pool_stride=pool_stride,
            pool_padding=pool_padding,
            global_pooling=global_pooling,
            use_cudnn=use_cudnn)

    def forward(self, inputs):
        x = self._conv2d(inputs)
        x = self._pool2d(x)
        return x

class MNIST(fluid.dygraph.Layer):
    def __init__(self, name_scope):
        super(MNIST, self).__init__(name_scope)

        self._simple_img_conv_pool_1 = SimpleImgConvPool(
            self.full_name(), 20, 5, 2, 2, act="relu")

        self._simple_img_conv_pool_2 = SimpleImgConvPool(
            self.full_name(), 50, 5, 2, 2, act="relu")

        pool_2_shape = 50 * 4 * 4
        SIZE = 10
        scale = (2.0 / (pool_2_shape**2 * SIZE))**0.5
        self._fc = fluid.dygraph.FC(self.full_name(),10,param_attr=fluid.param_attr.ParamAttr(initializer=fluid.initializer.NormalInitializer(loc=0.0, scale=scale)),act="softmax")
    
    def forward(self, inputs, label=None):
        x = self._simple_img_conv_pool_1(inputs)
        x = self._simple_img_conv_pool_2(x)
        x = self._fc(x)
        if label is not None:
            acc = fluid.layers.accuracy(input=x, label=label)
            return x, acc
        else:
            return x

def test_mnist(reader, model, batch_size):
    acc_set = []
    avg_loss_set = []
    for batch_id, data in enumerate(reader()):
        dy_x_data = np.array([x[0].reshape(1, 28, 28)
                              for x in data]).astype('float32')
        y_data = np.array(
            [x[1] for x in data]).astype('int64').reshape(batch_size, 1)

        img = fluid.dygraph.to_variable(dy_x_data)
        label = fluid.dygraph.to_variable(y_data)
        label.stop_gradient = True
        prediction, acc = model(img, label)
        loss = fluid.layers.cross_entropy(input=prediction, label=label)
        avg_loss = fluid.layers.mean(loss)
        acc_set.append(float(acc.numpy()))
        avg_loss_set.append(float(avg_loss.numpy()))

        # get test acc and loss
    acc_val_mean = np.array(acc_set).mean()
    avg_loss_val_mean = np.array(avg_loss_set).mean()

    return avg_loss_val_mean, acc_val_mean


def inference_mnist():
    with fluid.dygraph.guard():
        mnist_infer = MNIST("mnist")
        # load checkpoint
        model_dict, _ = fluid.dygraph.load_persistables("save_dir")
        mnist_infer.load_dict(model_dict)
        print("checkpoint loaded")

        # start evaluate mode
        mnist_infer.eval()

        def load_image(file):
            im = Image.open(file).convert('L')
            im = im.resize((28, 28), Image.ANTIALIAS)
            im = np.array(im).reshape(1, 1, 28, 28).astype(np.float32)
            im = im / 255.0 * 2.0 - 1.0
            return im

        cur_dir = os.path.dirname(os.path.realpath(__file__))
        tensor_img = load_image(cur_dir + '/image/2.png')

        results = mnist_infer(fluid.dygraph.to_variable(tensor_img))
        lab = np.argsort(results.numpy())
        print("Inference result of image/infer_3.png is: %d" % lab[0][-1])

def reader_creator(file_dir):
    files = os.listdir(file_dir)
    def reader():
        for fi in files:
            with open(file_dir + '/' + fi) as f:
                for line in f:
                    line_list = line.strip().split("\t")
                    if len(line_list) < 2:
                        continue
                    img, label = line_list
                    img_data = img.split(",")
                    img_data = map(float, img_data)
                    img_arr = np.array(img_data)
                    label = int(label)
                    yield img_arr, label

    return reader



if __name__ == '__main__':
    
    train_data = reader_creator(cluster_train_dir)
    test_data = reader_creator(cluster_train_dir)
    save_dirname = "./output/model/"
    with fluid.dygraph.guard():
	epoch_num = 1000
	BATCH_SIZE = 64
	mnist = MNIST("mnist")
	adam = fluid.optimizer.AdamOptimizer(learning_rate=0.001)
	test_reader = paddle.batch(test_data, batch_size=BATCH_SIZE, drop_last=True)

	train_reader = paddle.batch(
			train_data,
			batch_size=BATCH_SIZE,
			drop_last=True)

	for epoch in range(epoch_num):
	    for batch_id, data in enumerate(train_reader()):
		dy_x_data = np.array([x[0].reshape(1, 28, 28) for x in data]).astype('float32')
		y_data = np.array([x[1] for x in data]).astype('int64').reshape(-1, 1)

	        img = fluid.dygraph.to_variable(dy_x_data)
                label = fluid.dygraph.to_variable(y_data)
	        label.stop_gradient = True

                cost, acc = mnist(img, label)

        	loss = fluid.layers.cross_entropy(cost, label)
                avg_loss = fluid.layers.mean(loss)


                avg_loss.backward()

                adam.minimize(avg_loss)
	# save checkpoint
                mnist.clear_gradients()
	        if batch_id % 100 == 0:
	            print("Loss at epoch {} step {}: {:}".format(epoch, batch_id, avg_loss.numpy()))

	    mnist.eval()
	    test_cost, test_acc = test_mnist(test_reader, mnist, BATCH_SIZE)
            mnist.train()
	    print("Loss at epoch {} , Test avg_loss is: {}, acc is: {}".format(	epoch, test_cost, test_acc))

	fluid.dygraph.save_persistables(mnist.state_dict(), save_dirname)
	print("checkpoint saved")

        #inference_mnist()

 

您可能感兴趣的与本文相关的镜像

PaddlePaddle-v3.3

PaddlePaddle-v3.3

PaddlePaddle

PaddlePaddle是由百度自主研发的深度学习平台,自 2016 年开源以来已广泛应用于工业界。作为一个全面的深度学习生态系统,它提供了核心框架、模型库、开发工具包等完整解决方案。目前已服务超过 2185 万开发者,67 万企业,产生了 110 万个模型

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值