import torch
import torch.nn.functional as F
from torch.autograd import Variable
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
# data = np.array(pd.read_csv(r"D:\RegressionData\sin.csv"))
# x = data[:,0]
# y = data[:,-1]
# print(x.shape)
# print(y.shape)
# # x = torch.unsqueeze(torch.from_numpy(x),dim=1)
# x = torch.from_numpy(x)
# y = torch.from_numpy(y)
n_data = torch.ones(100,2)
x0 = torch.normal(2*n_data, 1)
y0 = torch.zeros(100)
x1 = torch.normal(-2* n_data, 1)
y1 = torch.ones(100)
x = torch.cat((x0,x1),0).type(torch.FloatTensor)
y = torch.cat((y0,y1),).type(torch.LongTensor)
x, y = Variable(x), Variable(y)
print(x.shape)
print(y.shape)
plt.scatter(x.data.numpy()[:,0],x.data.numpy()[:,1],c = y.data.numpy())
plt.show()
class Net(torch.nn.Module):
def __init__(self, n_feature, n_hidden, n_output):
super(Net, self).__init__()
self.hidden = torch.nn.Linear(n_feature, n_hidden)
self.predict = torch.nn.Linear(n_hidden, n_output)
def forward(self, x):
x = F.relu(self.hidden(x))
x = self.predict(x)
return x
net = Net(n_feature=2, n_hidden=20, n_output=2)
print(net)
optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
loss_func = torch.nn.CrossEntropyLoss()
plt.ion()
plt.show()
for t in range(100):
print("epochs:{}".format(t))
out = net(x)
loss = loss_func(out,y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if t % 2 == 0:
# plot and show learning process
plt.cla()
prediction = torch.max(out,1)[1]
pred_y = prediction.data.numpy()
true_y = y.data.numpy()
plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
accuracy = float((pred_y == true_y).astype(int).sum()) / float(true_y.size)
plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
plt.pause(0.1)
plt.ioff()
plt.show()
Pytorch 实现简单的二分类
最新推荐文章于 2024-03-12 21:27:00 发布