代码如下:
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.utils.data as Data
import numpy as np
from sklearn.datasets import load_wine
device=torch.device('cuda' if torch.cuda.is_available else 'cpu')#借助gpu训练
data = load_wine()#获取数据集
X = data.data
Y = data.target
x_train=torch.FloatTensor(X)#将数据集转化为tensor格式
y_train=torch.LongTensor(Y)
class MyDataset(Data.Dataset):#Dataset与Dataloader
def __init__(self,x_train,y_train):
self.x_train=x_train
self.y_train=y_train
def __getitem__(self,idx):
return x_train[idx],y_train[idx]
def __len__(self):
return len(x_train)
train_dataset=MyDataset(x_train,y_train)
train_dataloader=Data.DataLoader(train_dataset,batch_size=16,shuffle=True)
class MyModel(nn.Module):#搭建模型
def __init__(self):
super(MyModel, self).__init__()
self.liner1=nn.Linear(13,64)
self.activate1=nn.ReLU()
self.liner2=nn.Linear(64,8)
self.activate2=nn.ReLU()
self.liner3=nn.Linear(8,3)
def forward(self,x):
output=self.liner1(x)
output=self.activate1(output)
output = self.liner2(output)
output = self.activate2(output)
output = self.liner3(output)
return output
model=MyModel().to(device)
optimizer=optim.Adam(model.parameters(),lr=0.001)
loss_fn=nn.CrossEntropyLoss()
Epoch=10000#没必要训练这么多轮
idx=0
for i in range(Epoch):#训练模型
for x,y in train_dataloader:
x,y=x.to(device),y.to(device)
pred=model(x)
loss=loss_fn(pred,y)
idx+=1
if idx%1000==0:
print(loss)
#三件套
optimizer.zero_grad()
loss.backward()
optimizer.step()
import random #测试模型
success=0
cnt=1000
with torch.no_grad():#不产生梯度,表明当前计算不需要反向传播
for i in range(cnt):
t=random.randint(0,len(x_train)-1)
x_test=x_train[t].to(device)
y_test=y_train[t]
pred=model(x_test)
# print(pred)
result=np.argmax(pred.cpu().detach().numpy())
if result==y_test.cpu().detach().numpy():
success+=1
print(success/cnt*100,'%')
#这里测试模型有个问题,就是数据是从训练集中随机找到的,不是那么合理
测试结果:
测试结果居然是100%,大概率是因为过拟合了