#L1loss:依次相减再除以个数
#Inputs(N,*) N:batch_size多少个数据
#Targets(N,*)
# MSELOSS 均方差
#交叉熵:分类问题时
#Inputs(N , C) C:分类数
#Targets(N)
import torch
from torch.nn import L1Loss
from torch.nn import MSELoss
from torch.nn import CrossEntropyLoss
#from torch import nn --->使用nn.MSELoss()
inputs = torch.tensor([1,2,3],dtype=torch.float32) #计算时要求数是浮点数
targets = torch.tensor([1,2,5],dtype=torch.float32) #不能为整型
inputs = torch.reshape(inputs,(1,1,1,3)) #变为1batch_size,1channel,1行3列
targets = torch.reshape(targets,(1,1,1,3))
loss = L1Loss() #(reduction = 'sum') #可以选择计算的方式reduction
result = loss(inputs,targets) #将inputs和targets放在loss function中进行计算
loss_mse = MSELoss()
result_mse = loss_mse(inputs,targets)
print(result)#结果:tensor(0.6667)---》 [(1-1)+(2-2)+(5-3)]/3=0.6667
print(result_mse)#结果:tensor(1.3333) ---》[(1-1)+(2-2)+(5-3)*2]/3 = 4/3 = 1.3333
#交叉熵
x = torch.tensor([0.1, 0.2, 0.3])
y = torch.tensor([1])
x = torch.reshape(x,(1, 3))
loss_cross = CrossEntropyLoss()
return_cross = loss_cross(x, y)
print(return_cross) #结果:tensor(1.1019)
输出:
tensor(0.6667) tensor(1.3333) tensor(1.1019)