inverse="true"的含义

本文详细解释了在多对多关联关系中设置inverse属性为true时的使用场景,以及如何确保同时更新涉及的两个表。通过示例演示了如何在Teacher和Student的关联中正确应用inverse属性。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

在多对多关联中,如果设置了inverse="true"就表示本方不进行关联的维护,由另一方进行关联的维护。

比如在Teacher和Student的双向关联中,如果Teacher.hbm.xml中是这样的:

<set name="students" table="teacher_student" inverse="true">

     <key column="teacher_id"/>

    <many-to-many class="Student" column="student_id"/>

</set>

那么即使写上:

   Set<Student> students=new HashSet<Student>();

   students.add(s1);

   students.add(s2);

  

    teacher.setStudents(students);

   session.save(teacher);

那么Hibernate也只会向Teacher表中插入数据,而不向teacher_student表中插入数据。

如果要同时能够向teacher_student表中插入数据,需要将inverse设为false。

``` font_path = "C:/Windows/Fonts/simsun.ttc" # Windows路径 if not os.path.exists(font_path): font_path = "/System/Library/Fonts/Supplemental/Songti.ttc" # MacOS的路径 #加载字体 font_prop = font_manager.FontProperties(fname=font_path) rcParams['font.family'] = font_prop.get_name() rcParams['axes.unicode_minus'] = False # 解决负号无法显示的问题 # 读取与特征工程 df = pd.read_excel('EUA期货收盘价升序.xlsx', skiprows=8) df.columns = ['序号', '日期', '期货收盘价', '涨跌幅', '原始日期', '波动率'] df['日期'] = pd.to_datetime(df['日期'], format='%Y-%m-%d', errors='coerce') # 明确指定格式 df['期货收盘价'] = pd.to_numeric(df['期货收盘价'], errors='coerce') df['波动率'] = pd.to_numeric(df['波动率'], errors='coerce') df = df[['日期', '期货收盘价', '波动率']].dropna().sort_values('日期').reset_index(drop=True) # 新增衍生变量 df['收盘涨幅'] = df['期货收盘价'].pct_change().fillna(0) df['波动率涨幅'] = df['波动率'].pct_change().fillna(0) # 选定用于模型的变量 features = ['期货收盘价', '收盘涨幅', '波动率'] target = '波动率' # 标准化处理 scaler_X = MinMaxScaler() scaler_y = MinMaxScaler() df_scaled = df.copy() df_scaled[features] = scaler_X.fit_transform(df[features]) df_scaled[target] = scaler_y.fit_transform(df[[target]]) # 创建滑动窗口数据 def create_multivariate_dataset(dataframe, target_col=-1, window_size=20): data = dataframe.values # 转为 NumPy X, y = [], [] for i in range(len(data) - window_size): X.append(data[i:i+window_size, :]) y.append(data[i+window_size, target_col]) return np.array(X), np.array(y) window = 20 X, y = create_multivariate_dataset(df_scaled[features + [target]], target_col=-1, window_size=window) # 数据集拆分 split = int(len(X) * 0.8) X_train, X_test = X[:split], X[split:] y_train, y_test = y[:split], y[split:] # 构建LSTM网络 model = Sequential([ LSTM(64, return_sequences=True, input_shape=(X.shape[1], X.shape[2])), Dropout(0.2), LSTM(32), Dropout(0.2), Dense(1) ]) model.compile(optimizer='adam', loss='mean_squared_error') # 提前终止 early_stop = EarlyStopping(monitor='val_loss', patience=5, restore_best_weights=True) # 模型训练 history = model.fit( X_train, y_train, validation_split=0.2, epochs=100, batch_size=16, callbacks=[early_stop], verbose=1 ) # 预测与反归一化 y_pred_scaled = model.predict(X_test) y_pred = scaler_y.inverse_transform(y_pred_scaled) y_true = scaler_y.inverse_transform(y_test.reshape(-1, 1)) # 误差评估 rmse = np.sqrt(mean_squared_error(y_true, y_pred)) mae = mean_absolute_error(y_true, y_pred) r2 = r2_score(y_true, y_pred) print(f"增强版模型评估结果:\nRMSE = {rmse:.5f}, MAE = {mae:.5f}, R² = {r2:.5f}") # 可视化输出 plt.figure(figsize=(12, 6)) plt.plot(y_true, label='真实波动率') plt.plot(y_pred, label='预测波动率') plt.title('LSTM增强模型:预测 vs 实际') plt.xlabel('时间序列索引') plt.ylabel('波动率') plt.legend() plt.grid(True) plt.tight_layout() plt.show() # 残差分布 residuals = y_true.flatten() - y_pred.flatten() plt.figure(figsize=(10, 4)) plt.hist(residuals, bins=30, color='gray', edgecolor='black') plt.title('残差分布图') plt.xlabel('预测误差') plt.tight_layout() plt.show() # 残差趋势图 plt.figure(figsize=(12, 4)) plt.plot(residuals, label='预测误差') plt.axhline(0, color='red', linestyle='--') plt.title('预测误差趋势') plt.tight_layout() plt.show() # 散点图 plt.figure(figsize=(6, 6)) plt.scatter(y_true, y_pred, alpha=0.6) plt.plot([min(y_true), max(y_true)], [min(y_true), max(y_true)], color='red') plt.xlabel('真实波动率') plt.ylabel('预测波动率') plt.title('真实 vs 预测 散点图') plt.grid(True) plt.tight_layout() plt.show() # 保存预测结果 df_result = df.iloc[-len(y_true):].copy().reset_index(drop=True) df_result['真实波动率'] = y_true df_result['预测波动率'] = y_pred df_result['预测误差'] = residuals df_result.to_excel('波动率LSTM预测结果.xlsx', index=False) print("已保存为:波动率LSTM预测结果.xlsx")```解释一下这段代码的含义
03-29
这个是源代码,怎么改i,避免上述问题:“import os os.environ['KMP_DUPLICATE_LIB_OK'] = 'True' import torch import torch.nn as nn import torch.optim as optim import torch.utils.data as Data import numpy as np import pandas as pd import matplotlib.pyplot as plt from sklearn.preprocessing import StandardScaler from joblib import dump, load from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score # ============ 固定随机种子与设备 ============ torch.manual_seed(100) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ============ 1. 数据加载和预处理 ============ data = pd.read_csv('D:/PycharmProjects/PythonProject3/liaotou4.csv') feature_columns = data.columns[1:-2] target_columns = data.columns[-2:] X_all, y_all = data[feature_columns].values, data[target_columns].values scaler_x = StandardScaler() X_all = scaler_x.fit_transform(X_all) dump(scaler_x, 'scaler_x') scaler_y = StandardScaler() y_all = scaler_y.fit_transform(y_all) dump(scaler_y, 'scaler_y') # 根据批次进行数据切分,批次号在数据的第一列 batches = data['Batch'].unique() batches.sort() # 最后4个批次作为测试数据 test_batches = batches[-4:] train_batches = batches[:-4] train_data = data[data['Batch'].isin(train_batches)].reset_index(drop=True) test_data = data[data['Batch'].isin(test_batches)].reset_index(drop=True) # 生成训练与测试数据 seq_len = 60 def create_sequences(X, y, seq_len): X_seq, y_seq = [], [] for i in range(len(X) - seq_len): X_seq.append(X[i:i+seq_len]) y_seq.append(y[i+seq_len]) return np.array(X_seq), np.array(y_seq) # ========== 原始数据 ========== X_train = train_data[feature_columns].values y_train = train_data[target_columns].values X_test = test_data[feature_columns].values y_test = test_data[target_columns].values # ========== 标准化(注意:在构造序列之前) ========== scaler_x = StandardScaler() X_train = scaler_x.fit_transform(X_train) X_test = scaler_x.transform(X_test) scaler_y = StandardScaler() y_train = scaler_y.fit_transform(y_train) y_test = scaler_y.transform(y_test) dump(scaler_x, 'scaler_x.joblib') dump(scaler_y, 'scaler_y.joblib') # ========== 构造序列 ========== X_train_seq, y_train_seq = create_sequences(X_train, y_train, seq_len) X_test_seq, y_test_seq = create_sequences(X_test, y_test, seq_len) # ========== 转为 tensor ========== X_train_seq = torch.tensor(X_train_seq, dtype=torch.float32) y_train_seq = torch.tensor(y_train_seq, dtype=torch.float32) X_test_seq = torch.tensor(X_test_seq, dtype=torch.float32) y_test_seq = torch.tensor(y_test_seq, dtype=torch.float32) train_loader = Data.DataLoader(Data.TensorDataset(X_train_seq, y_train_seq), batch_size=64, shuffle=True, drop_last=True) test_loader = Data.DataLoader(Data.TensorDataset(X_test_seq, y_test_seq), batch_size=64, drop_last=False) # ============ 2. 模型定义 ============ from torch.nn import TransformerEncoder, TransformerEncoderLayer class TransformerBiLSTM(nn.Module): def __init__(self, input_dim, hidden_layer_sizes, hidden_dim, num_layers, num_heads, output_dim, dropout_rate=0.5): super().__init__() self.transformer = TransformerEncoder( TransformerEncoderLayer(input_dim, num_heads, hidden_dim, dropout=dropout_rate, batch_first=True), num_layers ) self.bilstm_layers = nn.ModuleList() self.bilstm_layers.append(nn.LSTM(input_dim, hidden_layer_sizes[0], batch_first=True, bidirectional=True)) for i in range(1, len(hidden_layer_sizes)): self.bilstm_layers.append(nn.LSTM(hidden_layer_sizes[i-1]*2, hidden_layer_sizes[i], batch_first=True, bidirectional=True)) self.linear = nn.Linear(hidden_layer_sizes[-1]*2, output_dim) def forward(self, input_seq): x = self.transformer(input_seq) for bilstm in self.bilstm_layers: x, _ = bilstm(x) return self.linear(x[:, -1, :]) # ============ 3. 超参数空间与适应度函数 ============ # param_bounds param_bounds = np.array([ [32, 128], # hidden_layer_sizes[0] [32, 128], # hidden_layer_sizes[1] [64, 256], # hidden_dim [1, 4], # num_layers [0, 1], # num_heads 索引 [0.05, 0.5], # dropout_rate [0.00005, 0.005]# learning rate ]) valid_heads = [1, 5] def eval_model_hyperparams(x): global latest_losses try: h1 = int(round(x[0])) h2 = int(round(x[1])) hidden_dim = int(round(x[2])) num_layers = int(round(x[3])) num_heads = valid_heads[int(round(x[4]))] dropout = float(x[5]) lr = float(x[6]) input_dim = 25 if input_dim % num_heads != 0: if latest_losses: return np.mean(latest_losses) * 1.5 return 1e3 model = TransformerBiLSTM(input_dim, [h1, h2], hidden_dim, num_layers, num_heads, 2, dropout).to(device) optimizer = optim.Adam(model.parameters(), lr) loss_fn = nn.MSELoss(reduction='sum') best_mse = float('inf') for _ in range(5): model.train() for seq, labels in train_loader: seq, labels = seq.to(device), labels.to(device) optimizer.zero_grad() loss = loss_fn(model(seq), labels) loss.backward() optimizer.step() model.eval() test_loss = sum(loss_fn(model(data.to(device)), label.to(device)).item() for data, label in test_loader) avg_loss = test_loss / len(test_loader.dataset) best_mse = min(best_mse, avg_loss) # 记录本轮结果 latest_losses.append(best_mse) return best_mse except Exception as e: print("Eval error:", e) if latest_losses: return np.mean(latest_losses) * 1.5 return 1e3 # ============ 4. 优化算法实现(自定义) ============ def ga_optimize(func, dim, bounds, pop_size=12, n_gen=16, pc=0.8, pm=0.2, verbose=False): lower, upper = np.array([b[0] for b in bounds]), np.array([b[1] for b in bounds]) pop = np.random.uniform(lower, upper, (pop_size, dim)) fitness = np.array([func(ind) for ind in pop]) best_curve = [fitness.min()] for gen in range(n_gen): idx = np.argsort(fitness) pop, fitness = pop[idx], fitness[idx] offspring = [] while len(offspring) < pop_size: if np.random.rand() < pc: parents = pop[np.random.choice(pop_size, 2, replace=False)] cross_point = np.random.randint(1, dim-1) child = np.concatenate([parents[0][:cross_point], parents[1][cross_point:]]) if np.random.rand() < pm: m_idx = np.random.randint(dim) child[m_idx] = np.random.uniform(lower[m_idx], upper[m_idx]) offspring.append(child) pop = np.array(offspring) fitness = np.array([func(ind) for ind in pop]) best_curve.append(fitness.min()) if verbose and gen % 4 == 0: print(f"[{gen+1}/{n_gen}] GA best: {fitness.min():.4f}") return pop[fitness.argmin()], fitness.min(), best_curve def pso_optimize(func, dim, bounds, pop_size=12, n_gen=16, w=0.7, c1=2, c2=2, verbose=False): lower, upper = np.array([b[0] for b in bounds]), np.array([b[1] for b in bounds]) X = np.random.uniform(lower, upper, (pop_size, dim)) V = np.zeros_like(X) P = X.copy() fitness = np.array([func(ind) for ind in X]) Pf = fitness.copy() G = X[fitness.argmin()].copy() Gf = fitness.min() best_curve = [Gf] for gen in range(n_gen): for i in range(pop_size): r1, r2 = np.random.rand(dim), np.random.rand(dim) V[i] = w*V[i] + c1*r1*(P[i]-X[i]) + c2*r2*(G-X[i]) X[i] += V[i] X[i] = np.clip(X[i], lower, upper) fitness = np.array([func(ind) for ind in X]) for i in range(pop_size): if fitness[i] < Pf[i]: P[i], Pf[i] = X[i], fitness[i] if fitness[i] < Gf: G, Gf = X[i], fitness[i] best_curve.append(Gf) if verbose and gen % 4 == 0: print(f"[{gen+1}/{n_gen}] PSO best: {Gf:.4f}") return G, Gf, best_curve def gwo_optimize(func, dim, bounds, pop_size=12, n_gen=16, verbose=False): lower, upper = np.array([b[0] for b in bounds]), np.array([b[1] for b in bounds]) X = np.random.uniform(lower, upper, (pop_size, dim)) best_curve = [] for gen in range(n_gen): fitness = np.array([func(x) for x in X]) idx = np.argsort(fitness) Alpha, Beta, Delta = X[idx[0]], X[idx[1]], X[idx[2]] a = 2 - 2*gen/n_gen for i in range(pop_size): for j in range(dim): r1, r2 = np.random.rand(), np.random.rand() A1, C1 = a*(2*r1-1), 2*r2 D_alpha = abs(C1*Alpha[j] - X[i][j]) X1 = Alpha[j] - A1*D_alpha r1, r2 = np.random.rand(), np.random.rand() A2, C2 = a*(2*r1-1), 2*r2 D_beta = abs(C2*Beta[j] - X[i][j]) X2 = Beta[j] - A2*D_beta r1, r2 = np.random.rand(), np.random.rand() A3, C3 = a*(2*r1-1), 2*r2 D_delta = abs(C3*Delta[j] - X[i][j]) X3 = Delta[j] - A3*D_delta X[i][j] = (X1 + X2 + X3)/3 X[i] = np.clip(X[i], lower, upper) best_curve.append(fitness[idx[0]]) if verbose and gen % 4 == 0: print(f"[{gen+1}/{n_gen}] GWO best: {fitness[idx[0]]:.4f}") return X[idx[0]], fitness[idx[0]], best_curve def de_optimize(func, dim, bounds, pop_size=12, n_gen=16, F=0.5, CR=0.9, verbose=False): lower, upper = np.array([b[0] for b in bounds]), np.array([b[1] for b in bounds]) pop = np.random.uniform(lower, upper, (pop_size, dim)) fitness = np.array([func(ind) for ind in pop]) best_curve = [fitness.min()] for gen in range(n_gen): for i in range(pop_size): idxs = np.random.choice([j for j in range(pop_size) if j != i], 3, replace=False) a, b, c = pop[idxs] mutant = np.clip(a + F * (b - c), lower, upper) cross_points = np.random.rand(dim) < CR if not np.any(cross_points): cross_points[np.random.randint(0, dim)] = True trial = np.where(cross_points, mutant, pop[i]) trial_fitness = func(trial) if trial_fitness < fitness[i]: pop[i], fitness[i] = trial, trial_fitness best_curve.append(fitness.min()) if verbose and gen % 4 == 0: print(f"[{gen+1}/{n_gen}] DE best: {fitness.min():.4f}") return pop[fitness.argmin()], fitness.min(), best_curve def sa_optimize(func, dim, bounds, T_max=50, T_min=1, L=30, max_stay_counter=15, verbose=False): lower, upper = np.array([b[0] for b in bounds]), np.array([b[1] for b in bounds]) x = (lower + upper) / 2 fx = func(x) T = T_max best_x, best_fx = x.copy(), fx stay_counter = 0 best_curve = [fx] while T > T_min and stay_counter < max_stay_counter: improved = False for _ in range(L): x_new = x + np.random.uniform(-0.1, 0.1, dim)*(upper-lower) x_new = np.clip(x_new, lower, upper) f_new = func(x_new) if f_new < fx or np.random.rand() < np.exp(-(f_new - fx) / (T+1e-8)): x, fx = x_new, f_new if fx < best_fx: best_x, best_fx = x.copy(), fx improved = True T *= 0.9 stay_counter = 0 if improved else stay_counter+1 best_curve.append(best_fx) if verbose: print(f"SA T={T:.2f}, best={best_fx:.4f}") return best_x, best_fx, best_curve def vppso(func, dim, bounds, N=30, N1=15, N2=15, T=30, alpha=0.3, c1=2.0, c2=2.0, b=1.0, verbose=True): lower = np.array([b[0] for b in bounds]) upper = np.array([b[1] for b in bounds]) X = np.random.uniform(lower, upper, (N, dim)) V = np.zeros_like(X) Pbest = X.copy() Pbest_f = np.array([func(x) for x in X]) Gbest = Pbest[Pbest_f.argmin()].copy() Gbest_f = Pbest_f.min() best_curve = [Gbest_f] for t in range(T): a_t = np.exp(-b * (t / T)**b) for i in range(N): if i < N1: # 第一子群体:带速度暂停机制的PSO if np.random.rand() < alpha: V[i] = V[i] # 速度暂停 else: V[i] = V[i] * np.random.rand() * a_t \ + c1 * np.random.rand() * (Pbest[i] - X[i]) \ + c2 * np.random.rand() * (Gbest - X[i]) X[i] = X[i] + V[i] # 第二子群体:扰动策略改为稳定版本 else: rand_sign = 1 if np.random.rand() < 0.5 else -1 perturb = np.random.normal(0, 0.1, dim) # 高斯扰动 X[i] = Gbest + rand_sign * a_t * perturb * np.abs(Gbest) # 更新个体和全局极值 F = np.array([func(x) for x in X]) for i in range(N): if F[i] < Pbest_f[i]: Pbest[i], Pbest_f[i] = X[i], F[i] if F[i] < Gbest_f: Gbest, Gbest_f = X[i], F[i] best_curve.append(Gbest_f) if verbose and (t % 5 == 0 or t == T-1): print(f"VPPSO [{t+1}/{T}] best: {Gbest_f:.5f}") return Gbest, Gbest_f, best_curve # ============ 5. 优化器字典 ============ optimizer_dict = { "GA": lambda: ga_optimize(eval_model_hyperparams, len(param_bounds), param_bounds, pop_size=12, n_gen=50, verbose=False), "PSO": lambda: pso_optimize(eval_model_hyperparams, len(param_bounds), param_bounds, pop_size=12, n_gen=50, verbose=False), "GWO": lambda: gwo_optimize(eval_model_hyperparams, len(param_bounds), param_bounds, pop_size=12, n_gen=50, verbose=False), "DE": lambda: de_optimize(eval_model_hyperparams, len(param_bounds), param_bounds, pop_size=12, n_gen=50, verbose=False), "SA": lambda: sa_optimize(eval_model_hyperparams, len(param_bounds), param_bounds, T_max=50, T_min=1, L=30, max_stay_counter=15, verbose=False), "VPPSO": lambda: vppso(eval_model_hyperparams, len(param_bounds), param_bounds, N=30, N1=15, N2=15, T=30, alpha=0.3, c1=2.0, c2=2.0, b=1.0, verbose=False) } colors = { "GA":"red", "PSO":"green", "DE":"orange", "GWO":"brown", "SA":"purple", "VPPSO":"blue" } def regression_metrics(true, pred): return { 'MSE': mean_squared_error(true, pred), 'MAE': mean_absolute_error(true, pred), 'R2': r2_score(true, pred) } def uniform_sample_indices(n, sample_n): return np.linspace(0, n-1, sample_n, dtype=int) final_results, all_curves, metrics_table = {}, {}, {} # ============ 6. 各算法优化与预测 ============ for name, opt in optimizer_dict.items(): print(f"\n>>> 开始优化算法: {name}") result = opt() best_x, best_loss, curve = result all_curves[name] = curve print(f"{name} 最优参数: {best_x}, Loss: {best_loss:.4f}") h1, h2 = int(round(best_x[0])), int(round(best_x[1])) hidden_dim, num_layers, num_heads = map(int, map(round, best_x[2:5])) dropout, lr = float(best_x[5]), float(best_x[6]) model = TransformerBiLSTM(X_train_seq.shape[2], [h1, h2], hidden_dim, num_layers, num_heads, y_train_seq.shape[1], dropout).to(device) optimizer = optim.Adam(model.parameters(), lr) loss_fn = nn.MSELoss(reduction='sum') def train_epochs(model, epochs): best_wts, best_mse = model.state_dict(), float('inf') for _ in range(20): # 可调大加精度,速度慢就用小点 model.train() for seq, label in train_loader: seq, label = seq.to(device), label.to(device) optimizer.zero_grad() loss = loss_fn(model(seq), label) loss.backward() optimizer.step() model.eval() total_loss = sum(loss_fn(model(x.to(device)), y.to(device)).item() for x, y in test_loader) mse = total_loss / len(test_loader.dataset) if mse < best_mse: best_mse, best_wts = mse, model.state_dict() model.load_state_dict(best_wts) train_epochs(model, 20) model.eval() all_preds, all_labels = [], [] with torch.no_grad(): for seq, label in test_loader: seq, label = seq.to(device), label.to(device) pred = model(seq) all_preds.append(pred.cpu().numpy()) all_labels.append(label.cpu().numpy()) scaler_y = load("scaler_y") preds = scaler_y.inverse_transform(np.concatenate(all_preds)) labels = scaler_y.inverse_transform(np.concatenate(all_labels)) final_results[name] = (labels, preds) # 计算两个目标各自的回归指标 metrics = [] for i in range(labels.shape[1]): m = regression_metrics(labels[:,i], preds[:,i]) metrics.append(m) metrics_table[name] = metrics # ============ 7. 绘制收敛曲线 ============ import matplotlib.pyplot as plt plt.style.use(['science', 'grid']) # <<== 重点:加载科学风格 plt.figure(figsize=(12,6)) for name, curve in all_curves.items(): plt.plot(curve, label=name, color=colors.get(name, None)) plt.xlabel("Iterations", fontsize=12) plt.ylabel("Optimal target value", fontsize=12) plt.xlim(left=0) plt.ylim(bottom=0) # 设置原点为(0,0) plt.legend(loc='best', fontsize=10, frameon=True, title="Algorithm") plt.tight_layout() plt.show() # ============ 8. 预测对比:均匀采样100个点,两个目标都展示 ============ import matplotlib.pyplot as plt plt.style.use(['science', 'grid']) # 启用科学样式 # 假设目标含义分别为水分含量和温度,这里明确标签 target_labels = [ "Moisture content of exported tobacco shreds", "Temperature of exported tobacco shreds" ] # 分别绘制两个目标值与预测值的对比图 for i in range(2): plt.figure(figsize=(10, 4.5)) # 控制图尺寸更美观 # 采样点:整个测试集中均匀抽取100个样本索引 idx = uniform_sample_indices(len(labels), 100) # 真实值(用 VPPSO 结果中的标签视为 ground truth) plt.plot( idx, final_results["VPPSO"][0][idx, i], label="True value", color="black", linestyle="--", linewidth=2.5 ) # 各个算法预测曲线 for name, (labels, preds) in final_results.items(): plt.plot( idx, preds[idx, i], label=name, color=colors.get(name, None), linewidth=1.8 ) # 坐标标签与图例 plt.xlabel("Sample", fontsize=11) plt.ylabel(target_labels[i], fontsize=11) # 图例设置在左上角,不压线 plt.legend(loc='upper left', fontsize=9, frameon=True, title="Algorithm") # 保证图例不挤压图像内容 plt.tight_layout() # 显示图像 plt.show() # ============ 9. 打印最终指标对比 ============ print("\n算法\t目标\tMSE\t\tMAE\t\tR2") for name, metrics in metrics_table.items(): for i, m in enumerate(metrics): print(f"{name}\t{i+1}\t{m['MSE']:.4f}\t{m['MAE']:.4f}\t{m['R2']:.4f}")
最新发布
08-04
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值