import torchvision.transforms
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torchvision import datasets
import numpy as np
import matplotlib.pyplot as plt
# 1.导入mnist数据集
train_dataset = datasets.MNIST(root="./data", train=True, transform=torchvision.transforms.ToTensor(), download=True)
test_dataset = datasets.MNIST(root="./data", train=False, transform=torchvision.transforms.ToTensor(), download=True)
train_data_size = len(train_dataset)
test_data_size = len(test_dataset)
# 2.加载数据集
train_loader = DataLoader(dataset=train_dataset, batch_size=64, shuffle=True)
test_loader = DataLoader(dataset=test_dataset, batch_size=64, shuffle=True)
# 定义一个学习率
learning_rate = 0.001
epochs = 5
batch_size=1024
# # 测试一下数据集的形状
# for data in test_loader:
# imgs,lables = data
# print(imgs)
# print(imgs.shape)
# print(lables)
# print(lables.shape)
# .对x进行标准化,把输入控制在[0,1]
def Normaliza(x):
# 计算均值
x_mean = np.mean(x)
# 计算标准差
x_std = np.std(x)
# 最后将输入控制在[0,1]之间
X = (x - x_mean) / x_std
return X
# # 定义sigmoid激活函数diff == True 为要求导
# def Sigmoid(x):
# # sigmoid函数
# return 1.0 / (1.0 + np.exp(-x))
# 定义一个softmax来计算y_hat
def softmax(x,w,b):
y_hat = np.dot(x,w) + b
y_exp = np.exp(y_hat)
y