50. Pow(x, n) @Python @Go

原题

https://leetcode.cn/problems/powx-n/description/

思路

递归

复杂度

时间:O(log(n))
空间:O(1)

Python代码

class Solution:
    def myPow(self, x: float, n: int) -> float:
        if n == 0:
            return 1 
        elif n == 1:
            return x 
        elif n < 0:
            return 1 / self.myPow(x, -n)
        elif n % 2 != 0:
            return self.myPow(x, n - 1) * x
        else:
            root = self.myPow(x, n // 2)
            return root * root

Go代码

func myPow(x float64, n int) float64 {
	if n == 0 {
		return 1
	} else if n == 1 {
		return x
	} else if n < 0 {
		return 1 / myPow(x, -n)
	} else if n%2 != 0 {
		return myPow(x, n-1) * x
	} else {
		root := myPow(x, n/2)
		return root * root
	}
}
# 定义贝叶斯线性层 class BayesianLinear(nn.Module): """修正的贝叶斯全连接层""" def __init__(self, in_features, out_features): super().__init__() self.in_features = in_features self.out_features = out_features # 权重参数分布参数 self.weight_mu = nn.Parameter(torch.Tensor(out_features, in_features)) self.weight_rho = nn.Parameter(torch.Tensor(out_features, in_features)) # 偏置参数分布参数 self.bias_mu = nn.Parameter(torch.Tensor(out_features)) self.bias_rho = nn.Parameter(torch.Tensor(out_features)) # 初始化参数 self.reset_parameters() # 观测噪声 self.log_sigma = nn.Parameter(torch.zeros(1)) def reset_parameters(self): """初始化参数""" # 权重初始化 nn.init.kaiming_normal_(self.weight_mu, nonlinearity='relu') nn.init.normal_(self.weight_rho, mean=-3, std=0.1) # 偏置初始化 nn.init.zeros_(self.bias_mu) nn.init.normal_(self.bias_rho, mean=-3, std=0.1) def forward(self, x): # 采样权重 if self.training: weight_sigma = torch.log1p(torch.exp(self.weight_rho)) weight_epsilon = torch.randn_like(self.weight_mu) weight = self.weight_mu + weight_sigma * weight_epsilon # 采样偏置 bias_sigma = torch.log1p(torch.exp(self.bias_rho)) bias_epsilon = torch.randn_like(self.bias_mu) bias = self.bias_mu + bias_sigma * bias_epsilon else: weight = self.weight_mu bias = self.bias_mu # 确保输入维度正确 if x.dim() == 1: x = x.unsqueeze(0) # 添加批次维度 # 执行线性变换 return F.linear(x, weight, bias) def kl_loss(self): """计算KL散度损失""" # 权重KL项 weight_sigma = torch.log1p(torch.exp(self.weight_rho)) kl = -0.5 * torch.sum(torch.log(weight_sigma.pow(2) + 1e-8) - self.weight_mu.pow(2) - weight_sigma.pow(2) + 1) # 偏置KL项 bias_sigma = torch.log1p(torch.exp(self.bias_rho)) kl += -0.5 * torch.sum(torch.log(bias_sigma.pow(2) + 1e-8) - self.bias_mu.pow(2) - bias_sigma.pow(2) + 1) return kl / (self.in_features * self.out_features) # 归一化 def observation_noise(self): """返回观测噪声""" return torch.exp(self.log_sigma) + 1e-6 # 确保正值 class BayesianNN(nn.Module): def __init__(self, input_dim=8, hidden_dim=50, output_dim=1): super().__init__() self.fc1 = BayesianLinear(input_dim, hidden_dim) self.fc2 = BayesianLinear(hidden_dim, hidden_dim) self.fc3 = BayesianLinear(hidden_dim, output_dim) self.relu = nn.ReLU() def forward(self, x): x = self.relu(self.fc1(x)) x = self.relu(self.fc2(x)) x = self.fc3(x) return x def kl_loss(self): return self.fc1.kl_loss() + self.fc2.kl_loss() + self.fc3.kl_loss() def observation_noise(self): # 假设我们只使用最后一层的观测噪声 return self.fc3.observation_noise() # 初始化模型 model = BayesianNN() device = torch.device("cuda") model.to(device) optimizer = optim.Adam(model.parameters(), lr=0.01) def train(model, train_loader, optimizer, epochs=500, kl_weight=0.1): device = next(model.parameters()).device model.train() for epoch in range(epochs): total_nll = 0.0 total_kl = 0.0 total_loss = 0.0 num_batches = 0 for batch_idx, (x_batch, y_batch) in enumerate(train_loader): x_batch = x_batch.to(device) y_batch = y_batch.to(device) # 确保输入维度正确 if x_batch.dim() == 1: x_batch = x_batch.unsqueeze(0) if y_batch.dim() == 1: y_batch = y_batch.unsqueeze(1) optimizer.zero_grad() outputs = model(x_batch) # 获取标量噪声 sigma = model.observation_noise() # 计算负对数似然损失 squared_diff = (outputs - y_batch).pow(2) nll_loss = (0.5 * squared_diff / sigma.pow(2)).mean() + sigma.log() # 计算KL散度 kl_loss = model.kl_loss() # 总损失 batch_loss = nll_loss + kl_weight * kl_loss batch_loss.backward() # 梯度裁剪 torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0) optimizer.step() # 更新统计 total_nll += nll_loss.item() total_kl += kl_loss.item() total_loss += batch_loss.item() num_batches += 1 # 每50个epoch打印一次 if epoch % 50 == 0: avg_nll = total_nll / num_batches avg_kl = total_kl / num_batches avg_loss = total_loss / num_batches print(f"Epoch {epoch}: Avg NLL={avg_nll:.4f}, Avg KL={avg_kl:.4f}, Avg Total={avg_loss:.4f}") # 保存模型 torch.save(model.state_dict(), 'bayesian_model.pth') print("Training completed. Model saved.") if __name__ == "__main__": train(model, train_loader, optimizer, epochs=100, kl_weight=0.1) 在目前这个代码的基础上,给出最后可视化结果的图像,包含随着训练epoch的相关指标变化,以及真实值和预测值之间的关系,涨落,分布,等等
07-21
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值