MXNet动手学深度学习笔记:多层感知机

本文通过使用多层感知机(MLP)对Fashion MNIST数据集进行分类任务,详细介绍了从数据预处理到模型搭建、训练及评估的全过程。通过Python编程语言和MXNet框架实现了一个简单的神经网络模型。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

#coding:utf-8
'''
多层感知机
'''
from mxnet import gluon
from mxnet import ndarray
from mxnet import autograd
import numpy as np
import matplotlib.pyplot as plt
from mxnet import nd

def transform(data,label):
    return data.astype('float32') / 255,label.astype('float32')

# 读取数据
mnist_train = gluon.data.vision.FashionMNIST(train=True,transform=transform)
mnist_test = gluon.data.vision.FashionMNIST(train=False,transform=transform)
batch_size = 256
train_data = gluon.data.DataLoader(mnist_train,batch_size,shuffle=True)
test_data = gluon.data.DataLoader(mnist_test,batch_size,shuffle=False)

num_inputs = 28*28
num_outputs = 10

# 定义感知机的隐含层节点
num_hidden = 256
weight_scale = 0.01

# 定义网络参数
W1 = nd.random_normal(shape=(num_inputs,num_hidden))
b1 = nd.zeros(num_hidden)

W2 = nd.random_normal(shape=(num_hidden,num_outputs))
b2 = nd.zeros(num_outputs)

params = [W1,b1,W2,b2]

# 参数求导申请
for param in params:
    param.attach_grad()

# 激活函数
def relu(X):
    return nd.maximum(X,0)

# 定义模型:将层全链接和激活函数串起来
def net(X):
    X = X.reshape((-1,num_inputs))
    h1 = relu(nd.dot(X,W1) + b1)
    output = nd.dot(h1,W2) + b2
    return output

# 定义交叉熵损失函数
softmax_cross_entropy = gluon.loss.SoftmaxCrossEntropyLoss()

# 优化器
def SGD(params,lr):
    for param in params:
        param[:] = param - lr * param.grad

# 定义精度计算
def accuracy(output,label):
    return nd.mean(output.argmax(axis=1) == label).asscalar()

# 估计模型精度
def evaluate_accuracy(data_iterator,net):
    acc = 0
    for data,label in data_iterator:
        output = net(data)
        acc += accuracy(output,label)
        return acc / len(data_iterator)

# 训练
learning_rate = 0.5

epochs = 5
for epoch in range(epochs):
    train_loss = 0.0
    train_acc = 0.0

    for data,label in train_data:
        with autograd.record():
            output =  net(data)
            loss = softmax_cross_entropy(output,label)

        loss.backward()
        SGD(params,learning_rate/batch_size)

        # 计算训练精度
        train_loss += nd.mean(loss).asscalar()
        train_acc += accuracy(output,label)

    test_acc = evaluate_accuracy(test_data,net)
    print('Epoch: %d, Loss %f, Train_Acc:%f, Test_Acc:%f .' %(epoch,train_loss/len(train_data),
            train_acc / len(train_data),test_acc))

def get_text_labels(label):
    text_labels = [
        't-shirt','trouser','pullover','dress','coat',
        'sandal','shirt','sneaker','bag','ankle boot'
    ]

    return [text_labels[int(i)] for i in label]

# 预测
data, label = mnist_test[0:9]
# show_images(data)
print('true labels')
print(get_text_labels(label))
predicted_labels = net(data).argmax(axis=1)
print('predicted labels')
print(get_text_labels(predicted_labels.asnumpy()))

 

转载于:https://my.oschina.net/wujux/blog/1809144

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值