基于mxnet的BP神经网络识别26个英文小写字母

本文通过使用MXNet深度学习框架,从数据下载到模型训练,再到预测和评估,全面展示了如何构建并训练一个用于字母识别的多层感知器模型。通过实际操作,深入理解深度学习的工作流程。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

import logging
logging.getLogger().setLevel(logging.INFO)
import mxnet as mx
import numpy as np

fname = mx.test_utils.download('http://archive.ics.uci.edu/ml/machine-learning-databases/letter-recognition/letter-recognition.data')
data = np.genfromtxt(fname, delimiter=',')[:,1:]
label = np.array([ord(l.split(',')[0])-ord('A') for l in open(fname, 'r')])

print("data.shape:", data.shape)
print("label.shape:", label.shape)

epoch = 10
batch_size = 32
ntrain = int(data.shape[0]*0.8)
train_iter = mx.io.NDArrayIter(data[:ntrain, :], label[:ntrain], batch_size, shuffle=True)
val_iter = mx.io.NDArrayIter(data[ntrain:, :], label[ntrain:], batch_size)

net = mx.sym.Variable('data')
net = mx.sym.FullyConnected(net, name='fc1', num_hidden=64)
net = mx.sym.Activation(net, name='relu1', act_type="relu")
net = mx.sym.FullyConnected(net, name='fc2', num_hidden=26)
net = mx.sym.SoftmaxOutput(net, name='softmax')
mx.viz.plot_network(net)

# construct a callback function to save checkpoints
model_prefix = 'mx_mlp'
checkpoint = mx.callback.do_checkpoint(model_prefix)

mod = mx.mod.Module(symbol=net,
                    context=mx.gpu(0),
                    data_names=['data'],
                    label_names=['softmax_label'])

train_api_method_level = 2

if(train_api_method_level == 0): # 1
    # allocate memory given the input data and label shapes
    mod.bind(data_shapes=train_iter.provide_data, label_shapes=train_iter.provide_label)
    # initialize parameters by uniform random numbers
    mod.init_params(initializer=mx.init.Uniform(scale=.1))
    # use SGD with learning rate 0.1 to train
    mod.init_optimizer(optimizer='sgd', optimizer_params=(('learning_rate', 0.1), ))
    # use accuracy as the metric
    metric = mx.metric.create('acc')
    
    # train 5 epochs, i.e. going over the data iter one pass
    for epoch in range(epoch):
        train_iter.reset()
        metric.reset()
        for batch in train_iter:
            mod.forward(batch, is_train=True)       # compute predictions
            mod.update_metric(metric, batch.label)  # accumulate prediction accuracy
            mod.backward()                          # compute gradients
            mod.update()                            # update parameters
        print('Epoch %d, Training %s' % (epoch, metric.get()))

elif train_api_method_level == 1:
    mod.fit(train_iter,
        eval_data=val_iter,
        optimizer='sgd',
        optimizer_params={'learning_rate':0.1},
        eval_metric='acc',
        num_epoch=epoch,
        epoch_end_callback=checkpoint)  
else:
    begin_epoch = 1 # index begin 0
    sym, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, begin_epoch)
    assert sym.tojson() == net.tojson()
    
    # assign the loaded parameters to the module
    #mod.set_params(arg_params, aux_params)
    mod.fit(train_iter,
        num_epoch=epoch,
        arg_params=arg_params,
        aux_params=aux_params,
        begin_epoch=begin_epoch)
    
# 预测和评估
y = mod.predict(val_iter)
assert y.shape == (4000, 26)    
print(y.shape)

score = mod.score(val_iter, ['acc'])
print("Accuracy score is %f" % (score[0][1]))

 

评论 2
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值