import numpy as np
import pandas as pd
import torch
import torch.nn as nn
from matplotlib import pyplot as plt
Data=pd.read_excel('D:/PyCharm/Data.xlsx',index_col=0)
arr=Data.values
arr=arr.astype(np.float32)
ts_data=torch.tensor(arr)
ts_data=ts_data.to('cuda:0')
print(ts_data.shape)
train_size=int(len(ts_data)*0.7)
test_size=len(ts_data)-train_size
ts_data=ts_data[torch.randperm(ts_data.size(0)),:]
train_data=ts_data[:train_size,:]
test_data=ts_data[train_size:,:]
print(train_data.shape)
print(test_data.shape)
class DNN(nn.Module):
def __init__(self):
super(DNN,self).__init__()
self.net=nn.Sequential(
nn.Linear(8,32),
nn.Sigmoid(),
nn.Linear(32,8),
nn.Sigmoid(),
nn.Linear(8,4),
nn.Sigmoid(),
nn.Linear(4,1),
nn.Sigmoid()
)
def forward(self,x):
y=self.net(x)
return y
model=DNN().to('cuda:0')
print(model)
loss_fn=nn.BCELoss(reduction='mean')
learning_rate=0.005
optimizer=torch.optim.Adam(model.parameters(),lr=learning_rate)
epoches=5000
losses=[]
X=train_data[:,:-1]
Y=train_data[:,-1].reshape(-1,1)
for epoch in range(epoches):
Pred=model(X)
loss=loss_fn(Pred,Y)
losses.append(loss.item())
optimizer.zero_grad()
loss.backward()
optimizer.step()
Fig=plt.figure()
plt.plot(range(epoches),losses)
plt.xlabel('epoch')
plt.ylabel('loss')
plt.show()
X=test_data[:,:-1]
Y=test_data[:,-1].reshape(-1,1)
with torch.no_grad():
Pred=model(X)
Pred[Pred>=0.5]=1
Pred[Pred<0.5]=0
correct=torch.sum((Pred==Y).all(1))
total=Y.size(0)
print(f"预测精度:{100*correct/total}")