PyTorch基础实现代码
import numpy as np
import torch
from torch.autograd import Variable
torch.manual_seed(2)
x_data=Variable(torch.Tensor([[1.0],[2.0],[3.0],[4.0]]))
y_data=Variable(torch.Tensor([[0.0],[0.0],[1.0],[1.0]]))
#initial
w=Variable(torch.Tensor([1]),requires_grad=True)#我把-1改成了1
b=Variable(torch.Tensor([0]),requires_grad=True)
epochs=100
costs=[]
lr=0.1
print('before training, predict of x=1.5 is')
print('y_pred = ',float(w.data*1.5+b.data>0))
#训练模型
for epoch in range(epochs):
A=1/(1+torch.exp(-(w*x_data+b)))
J=-torch.mean(y_data*torch.log(A)+(1-y_data)*torch.log(1-A))
costs.append(J.data.numpy())
J.backward()
w.data=w.data-lr*w.grad.data
w.grad.data.zero_()
b.data=b.data-lr*b.grad.data
b.grad.data.zero_()
print('after training, predict of x=1.5 is:')
print('y_pred = ',float(w.data*1.5+b.data>0))
print(w.data,b.data)
有一个我不是很懂的float的语法,这里分享给和我一样不会的朋友
float(-2<0)
#输出1
float(1.5>0)
#输出1
#括号里面是逻辑表达式,对的话就出1,不对的话出0,然后将1和0转化为float
同样解释一下这里定义的loss J
torch.manual_seed(2)
x_data=Variable(torch.Tensor([[1.0],[2.0],[3.0],[4.0]]))
y_data=Variable(torch.Tensor([[0.0],[0.0],[1.0],[1.0]]))
A=1/(1+torch.exp(-(w*x_data+b)))
J=-torch.mean(y_data*torch.log(A)+(1-y_data)*torch.log(1-A))
print(J)
print(J.data.numpy())
其实都是一个只包含一个数的tensor,numpy()后变成一个数
用PyTorch类实现Logistic regression,torch.nn.module写网络结构
import torch
from torch.autograd import Variable
torch.manual_seed(2)
x_data=Variable(torch.Tensor([[1.0],[2.0],[3.0],[4.0]]))
y_data=Variable(torch.Tensor([[0.0],[0.0],[1.0],[1.0]]))
#def the model
#claas来构建一个类,通过class Model来写一个神经网络
class Model(torch.nn.Module):
def __init__(self):
super(Model,self).__init__()
self.linear = torch.nn.Linear(1,1)
def forward(self,x):
y_pred=self.linear(x)
return y_pred
model=Model()
print(model)
criterion=torch.nn.BCEWithLogitsLoss()
optimizer=torch.optim.SGD(model.parameters(),lr=0.01)
#before training
hour_var=Variable(torch.Tensor([[4.0]]))
y_pred=model(hour_var)
print('before training, given',4,' the predict is',float(model(hour_var).data[0][0]>0.5))
epochs=40
for epoch in range(epochs):
y_pred=model(x_data)
loss=criterion(y_pred,y_data)
print(loss)
#print('epoch = ',epoch+1,loss.data[0])
optimizer.zero_grad()
loss.backward()
optimizer.step()
hour_var=Variable(torch.Tensor([[4.0]]))
y_pred=model(hour_var)
print('after training given',4,' the predict is',float(model(hour_var).data[0][0]>0.5))
这里要注意的是,如果用torch的logistics交叉熵损失函数,它判断是1/0的界限是0.5,而之前用基本pytorch语法定义的那个压缩映射函数logistics函数,判断1/0的界限是0.
print一下y_pred发现是1*1的tensor所有导出的时候要用.data[0][0]
附上学习的原图,里面有详细的注释方便复习