keras使用Lambda和Reshape自定义层、改动output、修改loss值

博客介绍了新版Keras中使用Lambda和Reshape自定义层、改动output及修改loss值的方法。Lambda可将任意表达式封装为Layer对象,因普通切片会破坏Layer,不能直接对layer数据切片,文中还给出了Lambda使用的例子,帮助理解自定义层。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

keras使用Lambda和Reshape自定义层、改动output、修改loss值

新版keras有一个Lambda工具可以帮助自定义层,同时可能会用到Reshape函数。
关于Lambda,它的作用就是将任意表达式封装为 Layer 对象。因为普通的切片会破坏Layer,所以不能直接对一个layer数据切片。
那怎么切片才能得到还是层的结果呢?
下面是一个Lambda使用例子说明:

...
out = TimeDistributed(Dense(label_size, activation='softmax'))(lstm_layer)
def slice(x,b):
    	return x[:,b,:]
outs=[]
for i in range(int(100/2)):
	c1=Lambda(slice,output_shape=(1,label_size),arguments={'b':2*i})(out)
    	c2=Lambda(slice,output_shape=(1,label_size),arguments={'b':2*i+1})(out)
    	c=Add()([c1,c2])#把下标每两个求和
        outs.append(c)#这样outs变成(50,label_size)
outs=Concatenate(axis=1)([p for p in outs])#将里面的内容合并,变成50*32=1600个,[1,2,3,..,1600]
out_=Reshape((50,label_size))(outs)#进行reshape,变成[[32个label],[32个label],[],...],共50个,因为定义了shape为(50,label_size).
model = Model(input=input_layer, output=out_)
model.compile(optimizer='adam', loss='categorical_crossentropy')
 ...

slice函数是实现对一个层out的切片,这里out的shape是(None,100,32),label_size=32,这个None不用管,比较抽象,可以想成是[[32个label],[ 32个label],[ 32个label],[ 32个label],…],里面共100个[32个label]。

看懂了以后,可以自己随便处理out,自定义层。
其他例子1
其他例子2
感谢你的浏览~

帮我整段修改一下,改成TensorFlow2.11版本可用的:“ def TGCN(_X, _weights, _biases): ### cell_1 = tgcnCell(cell=custom_cell,gru_units=gru_units, adj=adj, num_nodes=num_nodes) cell = tf.nn.rnn_cell.MultiRNNCell([cell_1], state_is_tuple=True) _X = tf.unstack(_X, axis=1) outputs, states = tf.nn.static_rnn(cell, _X, dtype=tf.float32) m = [] for i in outputs: o = tf.reshape(i,shape=[-1,num_nodes,gru_units]) o = tf.reshape(o,shape=[-1,gru_units]) m.append(o) last_output = m[-1] output = tf.matmul(last_output, _weights['out']) + _biases['out'] output = tf.reshape(output,shape=[-1,num_nodes,pre_len]) output = tf.transpose(output, perm=[0,2,1]) output = tf.reshape(output, shape=[-1,num_nodes]) return output, m, states ###### placeholders ###### # 使用 tf.placeholder inputs = tf.compat.v1.placeholder(tf.float32, shape=[None, seq_len, num_nodes]) labels = tf.compat.v1.placeholder(tf.float32, shape=[None, pre_len, num_nodes]) # Graph weights weights = { 'out': tf.Variable(tf.random.normal([gru_units, pre_len], mean=1.0), name='weight_o')} biases = { 'out': tf.random.normal(shape=[pre_len],name='bias_o')} if model_name == 'tgcn': pred,ttts,ttto = TGCN(inputs, weights, biases) y_pred = pred ###### optimizer ###### lambda_loss = 0.0015 Lreg = lambda_loss * sum(tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()) label = tf.reshape(labels, [-1,num_nodes]) ##loss loss = tf.reduce_mean(tf.nn.l2_loss(y_pred-label) + Lreg) ##rmse error = tf.sqrt(tf.reduce_mean(tf.square(y_pred-label))) optimizer = tf.train.AdamOptimizer(lr).minimize(loss) ###### Initialize session ###### variables = tf.global_variables() saver = tf.train.Saver(tf.global_variables()) #sess = tf.Session() gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) sess.run(tf.global_variables_initializer())”
04-05
import tkinter as tk from tkinter import ttk, filedialog, messagebox import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg import tensorflow as tf from tensorflow.keras.models import Model from tensorflow.keras.layers import Input, Dense, Lambda from tensorflow.keras.optimizers import Adam from sklearn.preprocessing import MinMaxScaler import os import time import warnings import matplotlib.dates as mdates warnings.filterwarnings('ignore', category=UserWarning, module='tensorflow') mpl.rcParams['font.sans-serif'] = ['SimHei', 'Microsoft YaHei', 'Arial Unicode MS'] mpl.rcParams['axes.unicode_minus'] = False # 关键修复:使用 ASCII 减号 # 设置中文字体支持 plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False class PINNModel(tf.keras.Model): def __init__(self, num_layers=4, hidden_units=32, dropout_rate=0.1, **kwargs): super(PINNModel, self).__init__(**kwargs) self.dense_layers = [] self.dropout_layers = [] # 创建隐藏对应的Dropout for _ in range(num_layers): self.dense_layers.append(Dense(hidden_units, activation='tanh')) self.dropout_layers.append(tf.keras.layers.Dropout(dropout_rate)) self.final_layer = Dense(1, activation='linear') # 添加更多带约束的物理参数 # 基本衰减系数 self.k1_raw = tf.Variable(0.1, trainable=True, dtype=tf.float32, name='k1_raw') self.k1 = tf.math.sigmoid(self.k1_raw) * 0.5 # 约束在0-0.5之间 # 水位依赖的衰减系数 self.k2_raw = tf.Variable(0.01, trainable=True, dtype=tf.float32, name='k2_raw') self.k2 = tf.math.sigmoid(self.k2_raw) * 0.1 # 约束在0-0.1之间 # 非线性项系数 self.alpha_raw = tf.Variable(0.1, trainable=True, dtype=tf.float32, name='alpha_raw') self.alpha = tf.math.sigmoid(self.alpha_raw) * 1.0 # 约束在0-1.0之间 # 外部影响系数(如降雨、温度等) self.beta_raw = tf.Variable(0.05, trainable=True, dtype=tf.float32, name='beta_raw') self.beta = tf.math.sigmoid(self.beta_raw) * 0.2 # 约束在0-0.2之间 def call(self, inputs, training=False): # 输入特征重构 year, month_sin, month_cos, day_sin, day_cos, h, dt_norm, log_dt_norm = inputs # 时间特征组合 time_features = tf.concat([ year, month_sin, month_cos, day_sin, day_cos, dt_norm, log_dt_norm, h * dt_norm, h * log_dt_norm ], axis=1) # 将时间、水位时间步长作为输入特征 x = tf.concat([t, h, dt, interaction], axis=1) # 依次通过所有隐藏Dropout for dense_layer, dropout_layer in zip(self.dense_layers, self.dropout_layers): x = dense_layer(x) x = dropout_layer(x, training=training) # 仅在训练时应用Dropout return self.final_layer(x) def physics_loss(self, h_current_raw, dt_raw, training=False): """在原始空间计算物理损失""" # 获取物理参数 k1 = tf.math.sigmoid(self.k1_raw) * 0.5 k2 = tf.math.sigmoid(self.k2_raw) * 0.1 alpha = tf.math.sigmoid(self.alpha_raw) * 1.0 beta = tf.math.sigmoid(self.beta_raw) * 0.2 # 物理方程计算 exponent = - (k1 + k2 * h_current_raw) * dt_raw exponent = tf.clip_by_value(exponent, -50.0, 50.0) decay_term = h_current_raw * tf.exp(exponent) beta_exp = -beta * dt_raw beta_exp = tf.clip_by_value(beta_exp, -50.0, 50.0) external_term = alpha * (1 - tf.exp(beta_exp)) # 预测(反归一化) h_next_pred = self.scaler_h.inverse_transform( self.model_output ) residual = h_next_pred - (decay_term + external_term) return tf.reduce_mean(tf.square(residual)) class DamSeepageModel: def __init__(self, root): self.root = root self.root.title("大坝渗流预测模型(PINNs)") self.root.geometry("1200x800") # 初始化数据 self.train_df = None # 训练集 self.test_df = None # 测试集 self.model = None self.scaler_t = MinMaxScaler(feature_range=(0, 1)) self.scaler_h = MinMaxScaler(feature_range=(0, 1)) self.scaler_dt = MinMaxScaler(feature_range=(0, 1)) # 新增归一化器 self.scaler_year = MinMaxScaler(feature_range=(0, 1)) self.scaler_month = MinMaxScaler(feature_range=(0, 1)) self.scaler_day = MinMaxScaler(feature_range=(0, 1)) self.scaler_dt = MinMaxScaler(feature_range=(0, 1)) self.scaler_log_dt = MinMaxScaler(feature_range=(0, 1)) self.evaluation_metrics = {} # 创建主界面 self.create_widgets() def create_widgets(self): # 创建主框架 main_frame = ttk.Frame(self.root, padding=10) main_frame.pack(fill=tk.BOTH, expand=True) # 左侧控制面板 control_frame = ttk.LabelFrame(main_frame, text="模型控制", padding=10) control_frame.pack(side=tk.LEFT, fill=tk.Y, padx=5, pady=5) # 文件选择部分 file_frame = ttk.LabelFrame(control_frame, text="数据文件", padding=10) file_frame.pack(fill=tk.X, pady=5) # 训练集选择 ttk.Label(file_frame, text="训练集:").grid(row=0, column=0, sticky=tk.W, pady=5) self.train_file_var = tk.StringVar() ttk.Entry(file_frame, textvariable=self.train_file_var, width=30, state='readonly').grid( row=0, column=1, padx=5) ttk.Button(file_frame, text="选择文件", command=lambda: self.select_file("train")).grid(row=0, column=2) # 测试集选择 ttk.Label(file_frame, text="测试集:").grid(row=1, column=0, sticky=tk.W, pady=5) self.test_file_var = tk.StringVar() ttk.Entry(file_frame, textvariable=self.test_file_var, width=30, state='readonly').grid(row=1, column=1, padx=5) ttk.Button(file_frame, text="选择文件", command=lambda: self.select_file("test")).grid(row=1, column=2) # PINNs参数设置 param_frame = ttk.LabelFrame(control_frame, text="PINNs参数", padding=10) param_frame.pack(fill=tk.X, pady=10) # 验证集切分比例 ttk.Label(param_frame, text="验证集比例:").grid(row=0, column=0, sticky=tk.W, pady=5) self.split_ratio_var = tk.DoubleVar(value=0.2) ttk.Spinbox(param_frame, from_=0, to=1, increment=0.05, textvariable=self.split_ratio_var, width=10).grid(row=0, column=1, padx=5) # 隐藏数量 ttk.Label(param_frame, text="网络数:").grid(row=1, column=0, sticky=tk.W, pady=5) self.num_layers_var = tk.IntVar(value=4) ttk.Spinbox(param_frame, from_=2, to=8, increment=1, textvariable=self.num_layers_var, width=10).grid(row=1, column=1, padx=5) # 每神经元数量 ttk.Label(param_frame, text="神经元数/:").grid(row=2, column=0, sticky=tk.W, pady=5) self.hidden_units_var = tk.IntVar(value=32) ttk.Spinbox(param_frame, from_=16, to=128, increment=4, textvariable=self.hidden_units_var, width=10).grid(row=2, column=1, padx=5) # 训练轮次 ttk.Label(param_frame, text="训练轮次:").grid(row=3, column=0, sticky=tk.W, pady=5) self.epochs_var = tk.IntVar(value=500) ttk.Spinbox(param_frame, from_=100, to=2000, increment=100, textvariable=self.epochs_var, width=10).grid(row=3, column=1, padx=5) # 物理损失权重 ttk.Label(param_frame, text="物理损失权重:").grid(row=4, column=0, sticky=tk.W, pady=5) self.physics_weight_var = tk.DoubleVar(value=0.5) ttk.Spinbox(param_frame, from_=0.1, to=1.0, increment=0.1, textvariable=self.physics_weight_var, width=10).grid(row=4, column=1, padx=5) # 控制按钮 btn_frame = ttk.Frame(control_frame) btn_frame.pack(fill=tk.X, pady=10) ttk.Button(btn_frame, text="训练模型", command=self.train_model).pack(side=tk.LEFT, padx=5) ttk.Button(btn_frame, text="预测结果", command=self.predict).pack(side=tk.LEFT, padx=5) ttk.Button(btn_frame, text="保存结果", command=self.save_results).pack(side=tk.LEFT, padx=5) ttk.Button(btn_frame, text="重置", command=self.reset).pack(side=tk.RIGHT, padx=5) # 状态栏 self.status_var = tk.StringVar(value="就绪") status_bar = ttk.Label(control_frame, textvariable=self.status_var, relief=tk.SUNKEN, anchor=tk.W) status_bar.pack(fill=tk.X, side=tk.BOTTOM) # 右侧结果显示区域 result_frame = ttk.Frame(main_frame) result_frame.pack(side=tk.RIGHT, fill=tk.BOTH, expand=True, padx=5, pady=5) # 创建标签页 self.notebook = ttk.Notebook(result_frame) self.notebook.pack(fill=tk.BOTH, expand=True) # 损失曲线标签页 self.loss_frame = ttk.Frame(self.notebook) self.notebook.add(self.loss_frame, text="训练损失") # 在预测结果标签页 self.prediction_frame = ttk.Frame(self.notebook) self.notebook.add(self.prediction_frame, text="预测结果") # 指标显示 self.metrics_var = tk.StringVar() metrics_label = ttk.Label( self.prediction_frame, textvariable=self.metrics_var, font=('TkDefaultFont', 10, 'bold'), relief='ridge', padding=5 ) metrics_label.pack(fill=tk.X, padx=5, pady=5) # 创建图表容器Frame chart_frame = ttk.Frame(self.prediction_frame) chart_frame.pack(fill=tk.BOTH, expand=True, padx=5, pady=5) # 初始化绘图区域 self.fig, self.ax = plt.subplots(figsize=(10, 6)) self.canvas = FigureCanvasTkAgg(self.fig, master=chart_frame) self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) # 添加Matplotlib工具栏(缩放、平移等) from matplotlib.backends.backend_tkagg import NavigationToolbar2Tk self.toolbar = NavigationToolbar2Tk(self.canvas, chart_frame) self.toolbar.update() self.canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) # 损失曲线画布(同样添加工具栏) loss_chart_frame = ttk.Frame(self.loss_frame) loss_chart_frame.pack(fill=tk.BOTH, expand=True, padx=5, pady=5) self.loss_fig, self.loss_ax = plt.subplots(figsize=(10, 4)) self.loss_canvas = FigureCanvasTkAgg(self.loss_fig, master=loss_chart_frame) self.loss_canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) self.loss_toolbar = NavigationToolbar2Tk(self.loss_canvas, loss_chart_frame) self.loss_toolbar.update() self.loss_canvas.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH, expand=True) def preprocess_data(self, df, is_training=False): """增强的时间特征处理""" # 创建时间戳 if 'datetime' not in df.columns: time_cols = ['year', 'month', 'day'] # 填充缺失的时间单位 for col in ['hour', 'minute', 'second']: if col not in df.columns: df[col] = 0 df['datetime'] = pd.to_datetime(df[time_cols]) df = df.set_index('datetime') # 计算时间步长(单位:天) if 'dt' not in df.columns: df['dt'] = df.index.to_series().diff().dt.total_seconds() / 86400 df['dt'] = df['dt'].fillna(df['dt'].mean()) # 处理异常时间步长 dt_mean = df['dt'].mean() df.loc[df['dt'] <= 0, 'dt'] = dt_mean df.loc[df['dt'] > 30, 'dt'] = dt_mean # 处理过大的时间间隔 # 添加对数变换的时间步长特征 df['log_dt'] = np.log1p(df['dt']) # 周期性时间特征 df['year_norm'] = df['year'] df['month_sin'] = np.sin(2 * np.pi * df['month'] / 12) df['month_cos'] = np.cos(2 * np.pi * df['month'] / 12) df['day_sin'] = np.sin(2 * np.pi * df['day'] / 31) df['day_cos'] = np.cos(2 * np.pi * df['day'] / 31) # 归一化处理 if is_training: self.scaler_year.fit(df[['year_norm']]) self.scaler_month.fit(df[['month_sin', 'month_cos']]) self.scaler_day.fit(df[['day_sin', 'day_cos']]) self.scaler_dt.fit(df[['dt']]) self.scaler_log_dt.fit(df[['log_dt']]) self.scaler_h.fit(df[['水位']]) # 水位归一化器 # 应用归一化 df[['year_norm']] = self.scaler_year.transform(df[['year_norm']]) df[['month_sin', 'month_cos']] = self.scaler_month.transform(df[['month_sin', 'month_cos']]) df[['day_sin', 'day_cos']] = self.scaler_day.transform(df[['day_sin', 'day_cos']]) df[['dt_norm']] = self.scaler_dt.transform(df[['dt']]) df[['log_dt_norm']] = self.scaler_log_dt.transform(df[['log_dt']]) df[['水位_norm']] = self.scaler_h.transform(df[['水位']]) # 归一化水位 return df def select_file(self, file_type): """选择Excel文件并应用预处理""" try: file_path = filedialog.askopenfilename(...) if not file_path: return df = pd.read_excel(file_path) # 验证必需列 required_cols = ['year', 'month', 'day', '水位'] missing_cols = [col for col in required_cols if col not in df.columns] if missing_cols: messagebox.showerror("列名错误", f"缺少必需列: {', '.join(missing_cols)}") return # 应用预处理 is_training = (file_type == "train") df = self.preprocess_data(df, is_training=is_training) # 保存数据 if file_type == "train": self.train_df = df self.train_file_var.set(os.path.basename(file_path)) self.status_var.set(f"已加载训练集: {len(self.train_df)}条数据") else: self.test_df = df self.test_file_var.set(os.path.basename(file_path)) self.status_var.set(f"已加载测试集: {len(self.test_df)}条数据") except Exception as e: error_msg = f"文件读取失败: {str(e)}\n\n请确保:\n1. 文件不是打开状态\n2. 文件格式正确\n3. 包含必需的时间水位列" messagebox.showerror("文件错误", error_msg) def calculate_metrics(self, y_true, y_pred): """计算评估指标""" from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score mse = mean_squared_error(y_true, y_pred) rmse = np.sqrt(mse) mae = mean_absolute_error(y_true, y_pred) non_zero_idx = np.where(y_true != 0)[0] if len(non_zero_idx) > 0: mape = np.mean(np.abs((y_true[non_zero_idx] - y_pred[non_zero_idx]) / y_true[non_zero_idx])) * 100 else: mape = float('nan') r2 = r2_score(y_true, y_pred) return { 'MSE': mse, 'RMSE': rmse, 'MAE': mae, 'MAPE': mape, # 修正键名 'R2': r2 } def train_model(self): """训练PINNs模型(带早停机制+训练指标监控)""" if self.train_df is None: messagebox.showwarning("警告", "请先选择训练集文件") return try: self.status_var.set("正在预处理数据...") self.root.update() # 从训练集中切分训练子集验证子集(时间顺序切分) split_ratio = 1 - self.split_ratio_var.get() split_idx = int(len(self.train_df) * split_ratio) train_subset = self.train_df.iloc[:split_idx] valid_subset = self.train_df.iloc[split_idx:] # 检查数据量是否足够 if len(train_subset) < 2 or len(valid_subset) < 2: messagebox.showerror("数据错误", "训练集数据量不足(至少需要2个时间步)") return # 数据预处理 - 分别归一化不同特征 # 归一化时间特征 t_train = train_subset['days'].values[1:].reshape(-1, 1) self.scaler_t.fit(t_train) t_train_scaled = self.scaler_t.transform(t_train).astype(np.float32) # 归一化水位特征 h_train = train_subset['水位'].values[:-1].reshape(-1, 1) self.scaler_h.fit(h_train) h_train_scaled = self.scaler_h.transform(h_train).astype(np.float32) # 归一化时间步长特征 dt_train = train_subset['dt'].values[1:].reshape(-1, 1) self.scaler_dt.fit(dt_train) dt_train_scaled = self.scaler_dt.transform(dt_train).astype(np.float32) # 归一化标签(下一时刻水位) h_next_train = train_subset['水位'].values[1:].reshape(-1, 1) h_next_train_scaled = self.scaler_h.transform(h_next_train).astype(np.float32) # 准备验证数据(同样进行归一化) t_valid = valid_subset['days'].values[1:].reshape(-1, 1) t_valid_scaled = self.scaler_t.transform(t_valid).astype(np.float32) h_valid = valid_subset['水位'].values[:-1].reshape(-1, 1) h_valid_scaled = self.scaler_h.transform(h_valid).astype(np.float32) dt_valid = valid_subset['dt'].values[1:].reshape(-1, 1) dt_valid_scaled = self.scaler_dt.transform(dt_valid).astype(np.float32) h_next_valid_scaled = self.scaler_h.transform( valid_subset['水位'].values[1:].reshape(-1, 1) ).astype(np.float32) # 原始用于指标计算 h_next_train_true = h_next_train h_next_valid_true = valid_subset['水位'].values[1:].reshape(-1, 1) # 创建模型优化器 self.model = PINNModel( num_layers=self.num_layers_var.get(), hidden_units=self.hidden_units_var.get() ) # 创建动态学习率调度器 initial_lr = 0.001 lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay( initial_learning_rate=initial_lr, decay_steps=100, # 每100步衰减一次 decay_rate=0.95, # 衰减率 staircase=True # 阶梯式衰减 ) optimizer = Adam(learning_rate=lr_schedule) # 在训练循环中,使用归一化后的数据 train_dataset = tf.data.Dataset.from_tensor_slices( ((t_train_scaled, h_train_scaled, dt_train_scaled), h_next_train_scaled) ) train_dataset = train_dataset.shuffle(buffer_size=1024).batch(32) valid_dataset = tf.data.Dataset.from_tensor_slices( ((t_valid_scaled, h_valid_scaled, dt_valid_scaled), h_next_valid_scaled) ) valid_dataset = valid_dataset.batch(32) # 初始化训练历史记录列表 train_data_loss_history = [] physics_loss_history = [] valid_data_loss_history = [] train_metrics_history = [] valid_metrics_history = [] # 早停机制参数 patience = int(self.epochs_var.get() / 3) min_delta = 1e-4 best_valid_loss = float('inf') wait = 0 best_epoch = 0 best_weights = None start_time = time.time() # 自定义训练循环 for epoch in range(self.epochs_var.get()): # 获取当前学习率 - 修复这里 current_lr = optimizer.learning_rate.numpy() # 直接访问属性而不是调用 # 训练阶段 epoch_train_data_loss = [] epoch_physics_loss = [] # 收集训练预测(归一化后) train_pred_scaled = [] for step, ((t_batch, h_batch, dt_batch), h_next_batch) in enumerate(train_dataset): with tf.GradientTape() as tape: # 预测下一时刻水位 h_pred = self.model([t_batch, h_batch, dt_batch], training=True) data_loss = tf.reduce_mean(tf.square(h_next_batch - h_pred)) # 动态调整物理损失权重 physics_weight = self.physics_weight_var.get() * (1 - self.epoch_var.get()* 0.5) # 计算物理损失(传入时间步长dt) physics_loss = self.model.physics_loss(h_current_raw, dt_raw, training=True) loss = data_loss + physics_weight * physics_loss grads = tape.gradient(loss, self.model.trainable_variables) optimizer.apply_gradients(zip(grads, self.model.trainable_variables)) epoch_train_data_loss.append(data_loss.numpy()) epoch_physics_loss.append(physics_loss.numpy()) train_pred_scaled.append(h_pred.numpy()) # 保存训练预测(归一化) # 合并训练预测(归一化后) train_pred_scaled = np.concatenate(train_pred_scaled, axis=0) # 反归一化得到原始预测 train_pred_true = self.scaler_h.inverse_transform(train_pred_scaled) # 计算训练集指标(使用原始真实预测) train_metrics = self.calculate_metrics( y_true=h_next_train_true.flatten(), y_pred=train_pred_true.flatten() ) train_metrics_history.append(train_metrics) # 验证阶段 epoch_valid_data_loss = [] valid_pred_scaled = [] for ((t_v_batch, h_v_batch, dt_v_batch), h_v_next_batch) in valid_dataset: h_v_pred = self.model([t_v_batch, h_v_batch, dt_v_batch], training=False) # 验证时不启用Dropout valid_data_loss = tf.reduce_mean(tf.square(h_v_next_batch - h_v_pred)) epoch_valid_data_loss.append(valid_data_loss.numpy()) valid_pred_scaled.append(h_v_pred.numpy()) # 保存验证预测(归一化) # 合并验证预测(归一化后) valid_pred_scaled = np.concatenate(valid_pred_scaled, axis=0) # 反归一化得到原始预测 valid_pred_true = self.scaler_h.inverse_transform(valid_pred_scaled) # 计算验证集指标(使用原始真实预测) valid_metrics = self.calculate_metrics( y_true=h_next_valid_true.flatten(), y_pred=valid_pred_true.flatten() ) valid_metrics_history.append(valid_metrics) # 计算平均损失 avg_train_data_loss = np.mean(epoch_train_data_loss) avg_physics_loss = np.mean(epoch_physics_loss) avg_valid_data_loss = np.mean(epoch_valid_data_loss) # 记录损失 train_data_loss_history.append(avg_train_data_loss) physics_loss_history.append(avg_physics_loss) valid_data_loss_history.append(avg_valid_data_loss) # 早停机制逻辑 current_valid_loss = avg_valid_data_loss # 早停机制逻辑 current_valid_loss = avg_valid_data_loss if current_valid_loss < best_valid_loss - min_delta: best_valid_loss = current_valid_loss best_epoch = epoch + 1 wait = 0 best_weights = self.model.get_weights() else: wait += 1 if wait >= patience: self.status_var.set(f"触发早停!最佳轮次: {best_epoch},最佳验证损失: {best_valid_loss:.4f}") if best_weights is not None: self.model.set_weights(best_weights) break # 确保在此处退出循环 # 更新状态(添加当前学习率显示) if epoch % 1 == 0: # 提取当前训练/验证的关键指标 train_rmse = train_metrics['RMSE'] valid_rmse = valid_metrics['RMSE'] train_r2 = train_metrics['R2'] valid_r2 = valid_metrics['R2'] elapsed = time.time() - start_time self.status_var.set( f"训练中 | 轮次: {epoch + 1}/{self.epochs_var.get()} | " f"学习率: {current_lr:.6f} | " f"训练RMSE: {train_rmse:.4f} | 验证RMSE: {valid_rmse:.4f} | " f"训练R²: {train_r2:.4f} | 验证R²: {valid_r2:.4f} | " f"k1: {self.model.k1.numpy():.6f}, k2: {self.model.k2.numpy():.6f} | 时间: {elapsed:.1f}秒 | 早停等待: {wait}/{patience}" ) self.root.update() # 绘制损失曲线 self.loss_ax.clear() epochs_range = range(1, len(train_data_loss_history) + 1) self.loss_ax.plot(epochs_range, train_data_loss_history, 'b-', label='训练数据损失') self.loss_ax.plot(epochs_range, physics_loss_history, 'r--', label='物理损失') self.loss_ax.plot(epochs_range, valid_data_loss_history, 'g-.', label='验证数据损失') self.loss_ax.set_title('PINNs训练与验证损失') self.loss_ax.set_xlabel('轮次') self.loss_ax.set_ylabel('损失', rotation=0) self.loss_ax.legend() self.loss_ax.grid(True, alpha=0.3) self.loss_ax.set_yscale('log') self.loss_canvas.draw() # 训练完成提示 elapsed = time.time() - start_time if wait >= patience: completion_msg = ( f"早停触发 | 最佳轮次: {best_epoch} | 最佳验证损失: {best_valid_loss:.4f} | " f"最佳验证RMSE: {valid_metrics_history[best_epoch - 1]['RMSE']:.4f} | " f"总时间: {elapsed:.1f}秒" ) else: completion_msg = ( f"训练完成 | 总轮次: {self.epochs_var.get()} | " f"最终训练RMSE: {train_metrics_history[-1]['RMSE']:.4f} | " f"最终验证RMSE: {valid_metrics_history[-1]['RMSE']:.4f} | " f"最终训练R²: {train_metrics_history[-1]['R2']:.4f} | " f"最终验证R²: {valid_metrics_history[-1]['R2']:.4f} | " f"总时间: {elapsed:.1f}秒" ) # 保存训练历史 self.train_history = { 'train_data_loss': train_data_loss_history, 'physics_loss': physics_loss_history, 'valid_data_loss': valid_data_loss_history, 'train_metrics': train_metrics_history, 'valid_metrics': valid_metrics_history } # 保存学习到的物理参数 self.learned_params = { "k1": self.model.k1.numpy(), "k2": self.model.k2.numpy(), "alpha": self.model.alpha.numpy(), "beta": self.model.beta.numpy() } self.status_var.set(completion_msg) messagebox.showinfo("训练完成", f"PINNs模型训练成功完成!\n{completion_msg}") except Exception as e: messagebox.showerror("训练错误", f"模型训练失败:\n{str(e)}") self.status_var.set("训练失败") def predict(self): """使用PINNs模型进行递归预测(带Teacher Forcing蒙特卡洛Dropout)""" if self.model is None: messagebox.showwarning("警告", "请先训练模型") return if self.test_df is None: messagebox.showwarning("警告", "请先选择测试集文件") return try: self.status_var.set("正在生成预测(使用Teacher ForcingMC Dropout)...") self.root.update() # 预处理测试数据 - 归一化 t_test = self.test_df['days'].values.reshape(-1, 1) t_test_scaled = self.scaler_t.transform(t_test).astype(np.float32) dt_test = self.test_df['dt'].values.reshape(-1, 1) dt_test_scaled = self.scaler_dt.transform(dt_test).astype(np.float32) h_test = self.test_df['水位'].values.reshape(-1, 1) h_test_scaled = self.scaler_h.transform(h_test).astype(np.float32) # 改进的递归预测参数 n = len(t_test) mc_iterations = 100 adaptive_forcing = True # 启用自适应教师强制 # 存储蒙特卡洛采样结果 mc_predictions_scaled = np.zeros((mc_iterations, n, 1), dtype=np.float32) # 进行多次蒙特卡洛采样 for mc_iter in range(mc_iterations): predicted_scaled = np.zeros((n, 1), dtype=np.float32) predicted_scaled[0] = h_test_scaled[0] # 第一个点使用真实 # 递归预测(带自适应教师强制) for i in range(1, n): # 自适应教师强制:后期阶段增加真实使用频率 if adaptive_forcing: # 前期70%概率使用真实,后期提高到90% teacher_forcing_prob = 0.7 + 0.2 * min(1.0, i / (0.7 * n)) else: teacher_forcing_prob = 0.7 # 决定使用真实还是预测 use_actual = np.random.rand() < teacher_forcing_prob if use_actual and i < n - 1: # 不能使用未来 h_prev = h_test_scaled[i - 1:i] else: h_prev = predicted_scaled[i - 1:i] t_prev = t_test_scaled[i - 1:i] dt_i = dt_test_scaled[i:i + 1] # 物理约束增强:添加物理模型预测作为参考 h_pred = self.model([t_prev, h_prev, dt_i], training=True) # 物理模型预测(用于约束) k1 = self.learned_params['k1'] k2 = self.learned_params['k2'] alpha = self.learned_params['alpha'] beta = self.learned_params['beta'] # 物理方程预测 exponent = - (k1 + k2 * h_prev) * dt_i decay_term = h_prev * np.exp(exponent) external_term = alpha * (1 - np.exp(-beta * dt_i)) physics_pred = decay_term + external_term # 混合预测:神经网络预测与物理模型预测加权平均 physics_weight = 0.3 # 物理模型权重 final_pred = physics_weight * physics_pred + (1 - physics_weight) * h_pred.numpy() predicted_scaled[i] = final_pred[0][0] mc_predictions_scaled[mc_iter] = predicted_scaled # 计算预测统计量 mean_pred_scaled = np.mean(mc_predictions_scaled, axis=0) std_pred_scaled = np.std(mc_predictions_scaled, axis=0) # 反归一化结果 predictions = self.scaler_h.inverse_transform(mean_pred_scaled) uncertainty = self.scaler_h.inverse_transform(std_pred_scaled) * 1.96 # 95%置信区间 actual_values = h_test test_time = self.test_df.index # 清除现有图表 self.ax.clear() # 计算合理的y轴范围 - 基于数据集中区域 # 获取实际预测的中位数 median_val = np.median(actual_values) # 计算数据的波动范围(标准差) data_range = np.std(actual_values) * 4 # 4倍标准差覆盖大部分数据 # 设置y轴范围为中心±数据波动范围 y_center = median_val y_half_range = max(data_range, 10) # 确保最小范围为20个单位 y_min_adjusted = y_center - y_half_range y_max_adjusted = y_center + y_half_range # 确保范围不为零 if y_max_adjusted - y_min_adjusted < 1: y_min_adjusted -= 5 y_max_adjusted += 5 # 绘制结果(带置信区间) self.ax.plot(test_time, actual_values, 'b-', label='真实', linewidth=2) self.ax.plot(test_time, predictions, 'r--', label='预测均', linewidth=2) self.ax.fill_between( test_time, (predictions - uncertainty).flatten(), (predictions + uncertainty).flatten(), color='orange', alpha=0.3, label='95%置信区间' ) # 设置自动调整的y轴范围 self.ax.set_ylim(y_min_adjusted, y_max_adjusted) self.ax.set_title('大坝渗流水位预测(PINNs with MC Dropout)') self.ax.set_xlabel('时间') self.ax.set_ylabel('测压管水位', rotation=0) self.ax.legend(loc='best') # 自动选择最佳位置 # 优化时间轴刻度 self.ax.xaxis.set_major_locator(mdates.YearLocator()) self.ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y')) self.ax.xaxis.set_minor_locator(mdates.MonthLocator(interval=2)) self.ax.grid(which='minor', axis='x', linestyle=':', color='gray', alpha=0.3) self.ax.grid(which='major', axis='y', linestyle='-', color='lightgray', alpha=0.5) self.ax.tick_params(axis='x', which='major', rotation=0, labelsize=9) self.ax.tick_params(axis='x', which='minor', length=2) # 计算评估指标(排除第一个点) eval_actual = actual_values[1:].flatten() eval_pred = predictions[1:].flatten() self.evaluation_metrics = self.calculate_metrics(eval_actual, eval_pred) # 添加不确定性指标 avg_uncertainty = np.mean(uncertainty) max_uncertainty = np.max(uncertainty) self.evaluation_metrics['Avg Uncertainty'] = avg_uncertainty self.evaluation_metrics['Max Uncertainty'] = max_uncertainty metrics_text = ( f"MSE: {self.evaluation_metrics['MSE']:.4f} | " f"RMSE: {self.evaluation_metrics['RMSE']:.4f} | " f"MAE: {self.evaluation_metrics['MAE']:.4f} | " f"MAPE: {self.evaluation_metrics['MAPE']:.2f}% | " f"R²: {self.evaluation_metrics['R2']:.4f}\n" f"平均不确定性: {avg_uncertainty:.4f} | 最大不确定性: {max_uncertainty:.4f}" ) self.metrics_var.set(metrics_text) # 在图表上添加指标 self.ax.text( 0.5, 1.05, metrics_text, transform=self.ax.transAxes, ha='center', fontsize=8, bbox=dict(facecolor='white', alpha=0.8) ) params_text = ( f"物理参数: k1={self.learned_params['k1']:.4f}, " f"k2={self.learned_params['k2']:.4f}, " f"alpha={self.learned_params['alpha']:.4f}, " f"beta={self.learned_params['beta']:.4f} | " f"Teacher Forcing概率: {teacher_forcing_prob}" ) self.ax.text( 0.5, 1.12, params_text, transform=self.ax.transAxes, ha='center', fontsize=8, bbox=dict(facecolor='white', alpha=0.8) ) # 调整布局 plt.tight_layout(pad=2.0) # 更新画布 self.canvas.draw() # 保存预测结果 self.predictions = predictions self.uncertainty = uncertainty self.actual_values = actual_values self.test_time = test_time self.mc_predictions = mc_predictions_scaled self.status_var.set(f"预测完成(MC Dropout采样{mc_iterations}次)") except Exception as e: messagebox.showerror("预测错误", f"预测失败:\n{str(e)}") self.status_var.set("预测失败") import traceback traceback.print_exc() def save_results(self): """保存预测结果训练历史数据""" if not hasattr(self, 'predictions') or not hasattr(self, 'train_history'): messagebox.showwarning("警告", "请先生成预测结果并完成训练") return # 选择保存路径 save_path = filedialog.asksaveasfilename( defaultextension=".xlsx", filetypes=[("Excel文件", "*.xlsx"), ("所有文件", "*.*")], title="保存结果" ) if not save_path: return try: # 1. 创建预测结果DataFrame result_df = pd.DataFrame({ '时间': self.test_time, '实际水位': self.actual_values.flatten(), '预测水位': self.predictions.flatten() }) # 2. 创建评估指标DataFrame metrics_df = pd.DataFrame([self.evaluation_metrics]) # 3. 创建训练历史DataFrame history_data = { '轮次': list(range(1, len(self.train_history['train_data_loss']) + 1)), '训练数据损失': self.train_history['train_data_loss'], '物理损失': self.train_history['physics_loss'], '验证数据损失': self.train_history['valid_data_loss'] } # 添加训练集指标 for metric in ['MSE', 'RMSE', 'MAE', 'MAPE', 'R2']: history_data[f'训练集_{metric}'] = [item[metric] for item in self.train_history['train_metrics']] # 添加验证集指标 for metric in ['MSE', 'RMSE', 'MAE', 'MAPE', 'R2']: history_data[f'验证集_{metric}'] = [item[metric] for item in self.train_history['valid_metrics']] history_df = pd.DataFrame(history_data) # 保存到Excel with pd.ExcelWriter(save_path) as writer: result_df.to_excel(writer, sheet_name='预测结果', index=False) metrics_df.to_excel(writer, sheet_name='评估指标', index=False) history_df.to_excel(writer, sheet_name='训练历史', index=False) # 保存图表 chart_path = os.path.splitext(save_path)[0] + "_chart.png" self.fig.savefig(chart_path, dpi=300) # 保存损失曲线图 loss_path = os.path.splitext(save_path)[0] + "_loss.png" self.loss_fig.savefig(loss_path, dpi=300) self.status_var.set(f"结果已保存至: {os.path.basename(save_path)}") messagebox.showinfo("保存成功", f"预测结果图表已保存至:\n" f"主文件: {save_path}\n" f"预测图表: {chart_path}\n" f"损失曲线: {loss_path}") except Exception as e: messagebox.showerror("保存错误", f"保存结果失败:\n{str(e)}") def reset(self): # 重置归一化器 self.scaler_t = MinMaxScaler(feature_range=(0, 1)) self.scaler_h = MinMaxScaler(feature_range=(0, 1)) self.scaler_dt = MinMaxScaler(feature_range=(0, 1)) """重置程序状态""" self.train_df = None self.test_df = None self.model = None self.train_file_var.set("") self.test_file_var.set("") # 清除训练历史 if hasattr(self, 'train_history'): del self.train_history # 清除图表 if hasattr(self, 'ax'): self.ax.clear() if hasattr(self, 'loss_ax'): self.loss_ax.clear() # 重绘画布 if hasattr(self, 'canvas'): self.canvas.draw() if hasattr(self, 'loss_canvas'): self.loss_canvas.draw() # 清除状态 self.status_var.set("已重置,请选择新数据") # 清除预测结果 if hasattr(self, 'predictions'): del self.predictions # 清除指标文本 if hasattr(self, 'metrics_var'): self.metrics_var.set("") messagebox.showinfo("重置", "程序已重置,可以开始新的分析") if __name__ == "__main__": root = tk.Tk() app = DamSeepageModel(root) root.mainloop() 检查错误并纠正
最新发布
08-01
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值