import torch
import numpy as np
import torch.nn.functional as F
import math
from functools import reduce
exp = math.exp
log = math.log
#设置实验数据
input = torch.tensor([[2.5, -0.5, 0.1],
[-1.1, 2.5, 0.0],
[1.2, 2.2, 3.1]], dtype=torch.float)
#python实现
def log_softmax(p):
m,n = p.size()
sm_arr = []
for i in range(m):
exp_xlist = list(map(lambda x: exp(x),p[i]))
tmp_sum = reduce(lambda x,y:x+y,exp_xlist)
tmp_list = [log(x/tmp_sum) for x in exp_xlist]
tmp_arr = np.array(tmp_list)
sm_arr.append(tmp_arr)
sm_arr = np.array(sm_arr)
return sm_arr
ety = log_softmax(input)
print(ety)
#输出为
"""
[[-0.13147117 -3.13147117 -2.53147116]
[-3.70382721 -0.10382719 -2.60382719]
[-2.3422072 -1.3422072 -0.44220734]]
"""
#python numpy 实现
a = input.numpy()
colsum = np.sum(np.exp(a),axis=1)
ety = []
for i in range(a.shape[0]):
tmp = np.log(np.exp(a[i]) /
探究logsoftmax的计算细节
最新推荐文章于 2025-07-06 21:11:57 发布