import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from torch.utils.data import Dataset, random_split, DataLoader
class MyData(Dataset):
def __init__(self, filepath):
df = pd.read_excel(filepath)
arr = df.values
arr = arr.astype(np.float32)
ts = torch.tensor(arr)
ts = ts.to('cuda')
self.X = ts[:, 1:9]
self.Y = ts[:, -1].reshape(-1, 1)
self.len = ts.shape[0]
def __getitem__(self, index):
return self.X[index], self.Y[index]
def __len__(self):
return self.len
Data = MyData('D:\PyCharm\Data.xlsx')
train_size = int(len(Data) * 0.7)
test_size = len(Data) - train_size
train_data, test_data = random_split(Data, [train_size, test_size])
train_loader = DataLoader(dataset=train_data, batch_size=128, shuffle=True)
test_loader = DataLoader(dataset=test_data, batch_size=64, shuffle=False)
class DNN(nn.Module):
def __init__(self):
super(DNN, self).__init__()
self.net = nn.Sequential(
nn.Linear(8, 32),
nn.Sigmoid(),
nn.Linear(32, 8),
nn.Sigmoid(),
nn.Linear(8, 4),
nn.Sigmoid(),
nn.Linear(4, 1),
nn.Sigmoid()
)
def forward(self, x):
y = self.net(x)
return y
model = DNN().to('cuda:0')
print(model)
loss_fn = nn.BCELoss(reduction='mean')
learning_rate = 0.005
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
epochs = 500
losses = []
for epoch in range(epochs):
for (x, y) in train_loader:
Pred = model(x)
loss = loss_fn(Pred, y)
losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
Fig = plt.figure()
plt.plot(range(len(losses)), losses)
plt.show()
correct = 0
total = 0
with torch.no_grad():
for (x, y) in test_loader:
Pred = model(x)
Pred[Pred >= 0.5] = 1
Pred[Pred < 0.5] = 0
correct += torch.sum((Pred == y).all(1))
total += y.size(0)
print(f"测试集精准度{100 * correct / total}%")