import torch
defforward(x):return x*w
defloss(x, y):
y_hat = forward(x)return(y-y_hat)**2if __name__ =="__main__":
x_data =[1.,2.,3.]
y_data =[2.,4.,6.]
w = torch.tensor([1.0])
w.requires_grad =Truefor epochs inrange(1000):for x, y inzip(x_data, y_data):
losses = loss(x, y)
losses.backward()
w.data = w.data - w.grad.data *0.001# w, w.grad are both tensor# we need to extract w.data to compute# if not ,we are building compute calculation diagram
w.grad.data = w.grad.data.zero_()# after update, it is essential to reset grad to zeroprint(f"epochs:{epochs} \n loss:{losses.item()} \n")print(f"forward(5), ", forward(5))
import torch
classMyModel(torch.nn.Module):def__init__(self):super(MyModel, self).__init__()
self.linear = torch.nn.Linear(1,1)# define the modeldefforward(self, x):
y_hat = self.linear(x)return y_hat
if __name__ =="__main__":
x_data = torch.tensor([[1.],[2.],[3.],[4.]])
y_data = torch.tensor([[3.],[5.],[7.],[9.]])# every element should use [] to contain in order to specify that it is a sample # if not it means x contain only one sample with a 3-dim vector instead of 3 1-dim vectors.
model = MyModel()
criterion = torch.nn.MSELoss(size_average=False)#optimizer = torch.optim.SGD(params=model.parameters(), lr=0.001)
optimer = torch.optim.Adam(model.parameters, lr =0.001)for ep inrange(10000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)print(ep, loss)
optimizer.zero_grad()# don't forget to reset the grad
loss.backward()
optimizer.step()print(f'w={model.linear.weight.data}\n')print(f'b={model.linear.bias.data}')# print the weight of a and the bias of b
输入的训练数据必须是tensor
torch的优化器可以和模型奋力,并不类似tensorflow,在模型编译的时候调用
m
o
d
e
l
.
c
o
m
p
i
l
e
(
o
p
t
i
m
i
z
e
r
=
.
.
.
,
l
o
s
s
=
.
.
.
)
model.compile(optimizer = ...,loss =...)
model.compile(optimizer=...,loss=...)
一点小修改就可以编程逻辑回归的分类模型
# -*-coding=utf-8import torch
import torch.nn.functional as F
classMyModel(torch.nn.Module):def__init__(self):super(MyModel, self).__init__()
self.linear = torch.nn.Linear(1,1)# define the modeldefforward(self, x):
y_hat = F.sigmoid(self.linear(x))# add the sigmoid functionreturn y_hat
if __name__ =='__main__':
x_data = torch.tensor([[1.],[2.],[3.],[4.]])
y_data = torch.tensor([[0.],[0.],[1.],[1.]])
model = MyModel()
criterion = torch.nn.BCELoss(size_average=False)# change the loss function
optimizer = torch.optim.SGD(params=model.parameters(), lr=.001)for ep inrange(1000):
y_pred = model(x_data)
loss = criterion(y_pred, y_data)
optimizer.zero_grad()
loss.backward()
optimizer.step()print(f"epochs:{ep}\nloss:{loss.item()}")print(model(torch.tensor([2.4])))
同样,我们可以借此做出多层感知机模型
# -*-coding=utf-8import torch
import numpy as np
from sklearn import datasets
classMyModel(torch.nn.Module):def__init__(self):super(MyModel, self).__init__()# don't worry about the bias, it will be done automatically
self.linear1 = torch.nn.Linear(9,6)# the bias do not add a new dimension
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
self.activate = torch.nn.Sigmoid()# wo can change the activation, such as relu, sign# define the modeldefforward(self, x):
x = self.activate(self.linear1(x))
x = self.activate(self.linear2(x))
x = self.activate(self.linear3(x))return x
if __name__ =="__main__":
data1 = datasets.load_diabetes()
x = torch.from_numpy(np.array(data1.data[:,:-1], dtype=np.float32))
y = torch.from_numpy(np.array(data1.data[:,[-1]], dtype=np.float32))
model = MyModel()# we must use y = torch.from_numpy(data[:, [-1]) to build a matrix# instead of (data[:, -1]) which build a tensor
criterion = torch.nn.BCELoss(size_average=False)
optimizer = torch.optim.SGD(params=model.parameters(), lr=0.001)for ep inrange(1000):
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()print(f"epoch:{ep}\nloss:{loss.item()}")
DataLoader的设置
# -*-coding = utf-8import numpy as np
import torch
from sklearn import datasets
from torch.utils.data import Dataset
# dataset is a abstract class, it can't be instantiation, its use for inheritfrom torch.utils.data import DataLoader
classDiabetesDataset(Dataset):def__init__(self, path=None):
da = datasets.load_diabetes()
xy = np.array(da.data, dtype=np.float32)
self.len= xy.shape[0]
self.x_data = torch.from_numpy(xy[:,:-1])
self.y_data = torch.from_numpy(xy[:,[-1]])# define this function to support [] implementationdef__getitem__(self, item):return self.x_data[item], self.y_data[item]# support len(model)implementationdef__len__(self):return self.lenclassMyModel(torch.nn.Module):def__init__(self):super(MyModel, self).__init__()# don't worry about the bias, it will be done automatically
self.linear1 = torch.nn.Linear(9,6)# the bias do not add a new dimension
self.linear2 = torch.nn.Linear(6,4)
self.linear3 = torch.nn.Linear(4,1)
self.activate = torch.nn.Sigmoid()# wo can change the activation, such as relu, sign# define the modeldefforward(self, x):
x = self.activate(self.linear1(x))
x = self.activate(self.linear2(x))
x = self.activate(self.linear3(x))return x
if __name__ =="__main__":
data = DiabetesDataset()
train_loader = DataLoader(dataset=data, batch_size=320, shuffle=True, num_workers=8)
model = MyModel()
criterion = torch.nn.BCELoss(size_average=False)
optimizer = torch.optim.SGD(params=model.parameters(), lr=0.001)for ep inrange(1000):for i,(x, y)inenumerate(train_loader):
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()print(f"epoch{ep}\nloss:{loss.item()}")