把神经网络丢到GPU上跑

第一步:

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

第二步:

把训练集验证集、net、loss什么的通通

.to(device)

比如

X_train = X_train.to(device)

完整代码如下(因为使用了很多自己的函数,所以仅供参考):

from torch import nn
import torch

from easier_nn.classic_dataset import fashion_mnist
from easier_nn.train_net import train_net_with_evaluation

device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

fm = fashion_mnist()
fm.load_fashion_mnist(flatten=False)
# 将数据移动到 GPU 上
fm.X_train = fm.X_train.to(device)
fm.y_train = fm.y_train.to(device)
fm.X_test = fm.X_test.to(device)
fm.y_test = fm.y_test.to(device)

lr = 0.001  # 学习率
num_epochs = 30  # 迭代周期
batch_size = 64  # 每个小批量样本的数量
train_iter, test_iter = fm.load_dataiter(batch_size=batch_size)
train_iter = [(X.to(device), y.to(device)) for X, y in train_iter]
test_iter = [(X.to(device), y.to(device)) for X, y in test_iter]
net = nn.Sequential(
    nn.Conv2d(1, 6, kernel_size=5, padding=2), nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2, stride=2),
    nn.Conv2d(6, 16, kernel_size=5), nn.Sigmoid(),
    nn.AvgPool2d(kernel_size=2, stride=2),
    nn.Flatten(),
    nn.Linear(16 * 5 * 5, 120), nn.Sigmoid(),
    nn.Linear(120, 84), nn.Sigmoid(),
    nn.Linear(84, 10))
net.to(device)
loss = nn.CrossEntropyLoss()
loss.to(device)
optimizer = torch.optim.Adam(net.parameters(), lr=lr)

net, _, _, _ = train_net_with_evaluation(fm.X_train, fm.y_train, fm.X_test, fm.y_test, data_iter=train_iter,
                                         test_iter=test_iter, net=net, loss=loss, optimizer=optimizer,
                                         num_epochs=num_epochs, show_interval=2, draw='acc')
fm.predict(net, test_iter, n=18, num_rows=3, num_cols=6)

效果:CPU

GPU

评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值