深度学习实验1

部署运行你感兴趣的模型镜像

一、Pytorch基本操作考察

1.使用 𝐓𝐞𝐧𝐬𝐨𝐫 Tensor 初始化一个 𝟏×𝟑  1×3 的矩阵 𝑴 M 和一个 𝟐×𝟏 2×1 的矩阵 𝑵N,对两矩阵进行减法操作(要求实现三种不同的形式),给出结果并分析三种方式的不同(如果出现报错,分析报错的原因),同时需要指出在计算过程中发生了什么
2.利用 𝐓𝐞𝐧𝐬𝐨𝐫 Tensor 创建两个大小分别 𝟑×𝟐3×2𝟒×𝟐 4×2 随机数矩阵 𝑷  P  𝑸  Q ,要求服从均值为0,标准差0.01为的正态分布;② 对第二步得到的矩阵 𝑸Q 进行形状变换得到 𝑸 Q 的转置 𝑸𝑻Q^T ;③ 对上述得到的矩阵 𝑷  P 和矩阵 𝑸𝑻Q^T 求内积
3.给定公式 𝑦3=𝑦1+𝑦2=𝑥2+𝑥3y_3=y_1+y_2=x^2+x^3𝑥=1x=1。利用学习所得到的Tensor的相关知识,求𝑦3y_3对的梯度𝑥x,即𝑑𝑦3𝑑𝑥(dy_3)/dx
要求在计算过程中,在计算 𝑥3〖 x〗^3 时中断梯度的追踪,观察结果并进行原因分析
提示, 可使用 with torch.no_grad()举例:
with torch.no_grad():
  y2 = x *
3

 二、动手实现 logistic 回归

1.要求动手0实现 logistic 回归(只借助TensorNumpy相关的库)在人工构造的数据集上进行训练和测试,并loss训练集以及测试集上的准确率等多个角度对结果进行分析
可借助nn.BCELossnn.BCEWithLogitsLoss作为损失函数,从零实现二元交叉熵为选作)
2. 利用 torch.nn 实现 logistic 回归人工构造的数据集上进行训练和测试,并对结果进行分析,并loss训练集以及测试集上的准确率等多个角度对结果进行分析

 三、动手实现 softmax 回归

1.要求动手0实现 softmax 回归(只借助TensorNumpy相关的库)在Fashion-MNIST数据集上进行训练和测试,并loss训练集以及测试集上的准确率等多个角度对结果进行分析
(要求从零实现交叉熵损失函数)
2. 利用torch.nn实现 softmax 回归Fashion-MNIST数据集上进行训练和测试,并loss,训练集以及测试集上的准确率等多个角度对结果进行分析

 以上为题目,以下为实验代码

 T1

import torch
import torchvision

#基础操作1
M=torch.tensor([[20,23,10]])
print(M)

N=torch.tensor([[2],
                [23]])
print(N)

print(M-N)
print(torch.sub(M,N))
print(M.sub(N))

#基础操作2
P=torch.normal(0,0.01,(3,2),dtype=torch.float32)
Q=torch.normal(0,0.01,(4,2),dtype=torch.float32)
print(P)
print(Q)


Q_T=torch.t(Q)
print(Q_T)

out=torch.mm(P,Q_T)
print(out)
#基础操作3

#方法1:不使用with torch.no_grad()
x=torch.ones(1,1,requires_grad=True)
y1=x**2
y2=x**3
print(y1)
print(y2)

y3=y1+y2
y3.backward()
print(x.grad)

#方法2:使用with torch.no_grad()
x=torch.ones(1,1,requires_grad=True)
y1=x**2
y2=x**3
print(y1)
print(y2)
with torch.no_grad():
    y2=x**3

y3=y1+y2
y3.backward()
print(x.grad)

T2

#第一问
import torch
from matplotlib import pyplot as plt
import numpy as np



n_data = torch.ones(50, 2)
x1 = torch.normal(2 * n_data, 1)
x2 = torch.normal(-2 * n_data, 1)
y1 = torch.zeros(50, 1)
y2 = torch.ones(50, 1)
x = torch.cat((x1, x2), 0).type(torch.FloatTensor)
y = torch.cat((y1, y2), 0).type(torch.FloatTensor)
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=np.squeeze(y.data.numpy()), s=100, lw=0, cmap='RdYlGn')
plt.show()

# 初始化
w = torch.tensor(np.random.normal(0, 0.01, (2, 1)), dtype=torch.float32)
b = torch.zeros(1, dtype=torch.float32)
w.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
epochs = 300
L = []
lr = 0.01

#开始训练
for epoch in range(epochs):
    A = 1 / (1 + torch.exp(-(x.mm(w) + b)))  # Logistic函数
    J = -torch.mean(y * torch.log(A) + (1 - y) * torch.log(1 - A))  # 对数似然函数
    L.append(J.data.numpy().item())
    J.backward()
    w.data = w.data - lr * w.grad.data
    w.grad.data.zero_()
    b.data = b.data - lr * b.grad.data
    b.grad.data.zero_()

plt.plot(L)
plt.xlabel("epoch")
plt.ylabel("loss")
print(w)
print(b)

xa = list(range(-5, 5))
xb = []
for item in xa:
    xb.append(-(b.data + item * w[0]).detach().numpy() / w[1].detach().numpy())

fig, ax = plt.subplots()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=np.squeeze(y.data.numpy()), s=100, lw=0, cmap='RdYlGn')
plt.plot(xa, xb)
plt.show()

#第二问
import torch
import numpy as np
import matplotlib.pyplot as plt
from torch import nn

n_data = torch.ones(50, 2)
x1 = torch.normal(2 * n_data, 1)
x2 = torch.normal(-2 * n_data, 1)
y1 = torch.zeros(50, 1)
y2 = torch.ones(50, 1)

x = torch.cat((x1, x2), 0).type(torch.FloatTensor)
y = torch.cat((y1, y2), 0).type(torch.FloatTensor)
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=np.squeeze(y.data.numpy()), s=100, lw=0, cmap='RdYlGn')
plt.show()

class Logistic(nn.Module):
    def __init__(self):
        super(Logistic, self).__init__()
        self.linear = nn.Linear(2, 1)
        self.sigmoid = nn.Sigmoid()

    def forward(self, x):
        y_pred1 = self.linear(x)
        y_pred = self.sigmoid(y_pred1)
        return y_pred

model = Logistic()

criterion = nn.BCELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
EPOCHS = 2000
costs = []
for epoch in range(EPOCHS):
    out = model(x)
    loss = criterion(out, y)
    costs.append(loss.data.item())
    optimizer.zero_grad()
    loss.backward()
    optimizer.step()
plt.plot(costs)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.show()

w1, w2 = model.linear.weight[0].detach().numpy()
b = model.linear.bias.detach().numpy()
plot_x = range(-5, 6, 1)
plot_y = [-(w1 * item + b) / w2 for item in plot_x]
print(w1, '\n', w2)
print(b)

fig, ax = plt.subplots()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=np.squeeze(y.data.numpy()), s=100, lw=0, cmap='RdYlGn')
ax.plot(plot_x, plot_y)
plt.show()

T3

#第一问
import torch
from tqdm import tqdm
import torchvision
import torchvision.transforms as transforms

# 加载Fashion-MNIST数据集
mnist_train = torchvision.datasets.FashionMNIST(
    root='~/Datasets/FashionMNIST',
    train=True,
    download=True,
    transform=transforms.ToTensor()
)
mnist_test = torchvision.datasets.FashionMNIST(
    root='~/Datasets/FashionMNIST',
    train=False,
    download=True,
    transform=transforms.ToTensor()
)

# 构建样本读取器
batch_size = 256
train_iter = torch.utils.data.DataLoader(
    mnist_train,
    batch_size=batch_size,
    shuffle=True,
    num_workers=0
)
test_iter = torch.utils.data.DataLoader(
    mnist_test,
    batch_size=batch_size,
    shuffle=False,
    num_workers=0
)


num_inputs = 784
num_outputs = 10
W = torch.normal(0, 0.1, (num_inputs, num_outputs), dtype=torch.float32)  # 784*10
b = torch.normal(0, 0.01, (1, num_outputs), dtype=torch.float32)  # 偏差参数1*10
W.requires_grad_(requires_grad=True)
b.requires_grad_(requires_grad=True)
# softmax回归模型
def net(X):
    X = torch.mm(X.view((-1, num_inputs)), W) + b
    return X
# 交叉熵损失函数
def loss(y_hat, y):
    y_hat = torch.nn.functional.softmax(y_hat, dim=1)
    return -torch.log(y_hat.gather(1, y.view(-1, 1)))
# 优化器
def sgd(params, lr, batch_size):
    for param in params:
        param.data -= lr * param.grad / batch_size

def evaluate_accuracy(data_iter, net):
    right_count, all_num = 0.0, 0
    for x, y in data_iter:
        y_hat = net(x)
        right_count += (y_hat.argmax(dim=1) == y).float().sum().item()
        all_num += y.shape[0]
    return right_count / all_num
# 模型训练
lr = 0.1
num_epochs = 5
for epoch in range(num_epochs):
    train_right_sum, train_all_sum, train_loss_sum = 0.0, 0, 0.0
    for X, y in tqdm(train_iter):
        y_hat = net(X)
        l = loss(y_hat, y).sum()
        l.backward()
        sgd([W, b], lr, batch_size)
        W.grad.data.zero_()
        b.grad.data.zero_()
        train_loss_sum += l.item()
        train_right_sum += (y_hat.argmax(dim=1) == y).sum().item()
        train_all_sum += y.shape[0]
    test_acc = evaluate_accuracy(test_iter, net)
    print('epoch %d, loss %.4f' % (epoch + 1, train_loss_sum / train_all_sum))
    print('train accuracy %.3f, test accuracy %.3f' % (train_right_sum / train_all_sum, test_acc))
#第二问
import torch
from tqdm import tqdm
import torchvision
import torchvision.transforms as transforms

# 加载数据集
mnist_train = torchvision.datasets.FashionMNIST(
    root='~/Datasets/FashionMNIST',
    train=True,
    download=True,
    transform=transforms.ToTensor()
)
mnist_test = torchvision.datasets.FashionMNIST(
    root='~/Datasets/FashionMNIST',
    train=False,
    download=True,
    transform=transforms.ToTensor()
)

batch_size = 256
train_iter = torch.utils.data.DataLoader(
    mnist_train,
    batch_size=batch_size,
    shuffle=True,
    num_workers=0
)
test_iter = torch.utils.data.DataLoader(
    mnist_train,
    batch_size=batch_size,
    shuffle=False,
    num_workers=0
)

num_inputs = 784
num_outputs = 10

# 构建模型
class SoftmaxNet(torch.nn.Module):
    def __init__(self, n_features, n_labels):
        super(SoftmaxNet, self).__init__()
        self.linear = torch.nn.Linear(n_features, n_labels)

    def forward(self, x):
        x_ = x.view((-1, num_inputs))
        y_ = self.linear(x_)
        y_hat = torch.nn.functional.softmax(y_, dim=1)
        return y_hat

# 创建模型实例
net = SoftmaxNet(num_inputs, num_outputs)
lr = 0.1
loss = torch.nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(net.parameters(), lr=lr)

# 测试集优化度与损失计算
def get_test_info(data_iter, net):
    right_count, all_count = 0.0, 0
    total_loss = 0.0
    for x, y in data_iter:
        y_ = net(x)
        total_loss += loss(y_, y).item()
        right_count += (y_.argmax(dim=1) == y).sum().item()
        all_count += y.shape[0]
    return right_count / all_count, total_loss / len(data_iter)

# 模型训练与测试
num_epochs = 5
for epoch in range(num_epochs):
    train_right_count, train_all_count = 0.0, 0
    for X, y in tqdm(train_iter):
        y_ = net(X)
        l = loss(y_, y)
        l.backward()
        optimizer.step()
        optimizer.zero_grad()
        train_right_count += (y_.argmax(dim=1) == y).sum().item()
        train_all_count += y.shape[0]

    test_acc, test_avg_loss = get_test_info(test_iter, net)
    print(f'epoch {epoch + 1}, train loss {l.item():.4f}, train accuracy {train_right_count / train_all_count:.3f}')
    print(f'test loss {test_avg_loss:.4f}, test accuracy {test_acc:.3f}')

您可能感兴趣的与本文相关的镜像

PyTorch 2.5

PyTorch 2.5

PyTorch
Cuda

PyTorch 是一个开源的 Python 机器学习库,基于 Torch 库,底层由 C++ 实现,应用于人工智能领域,如计算机视觉和自然语言处理

评论
成就一亿技术人!
拼手气红包6.0元
还能输入1000个字符
 
红包 添加红包
表情包 插入表情
 条评论被折叠 查看
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值