#定义学习器
self.nf_optimizer = torch.optim.Adam(params, lr=self.nf_conf.lr)
#学习率余弦衰减函数
def adjust_learning_rate(c, optimizer, epoch):
lr = c.lr
if c.lr_cosine:
eta_min = lr * (c.lr_decay_rate ** 3) #学习率衰减下限
lr = eta_min + (lr - eta_min) * (
1 + math.cos(math.pi * epoch / c.meta_epochs)) / 2
else:
steps = np.sum(epoch >= np.asarray(c.lr_decay_epochs))
if steps > 0:
lr = lr * (c.lr_decay_rate ** steps)
for param_group in optimizer.param_groups:
param_group['lr'] = lr
for epoch in range(1, total_epochs+1):
#调用
adjust_learning_rate(self.nf_conf, self.nf_optimizer, epoch)
学习率余弦衰减
最新推荐文章于 2025-05-02 09:45:33 发布
2986

被折叠的 条评论
为什么被折叠?



