tst_max.h

Windows消息处理与对话框

  name="google_ads_frame" marginwidth="0" marginheight="0" src="http://pagead2.googlesyndication.com/pagead/ads?client=ca-pub-5572165936844014&dt=1194442938015&lmt=1194190197&format=336x280_as&output=html&correlator=1194442937843&url=file%3A%2F%2F%2FC%3A%2FDocuments%2520and%2520Settings%2Flhh1%2F%E6%A1%8C%E9%9D%A2%2FCLanguage.htm&color_bg=FFFFFF&color_text=000000&color_link=000000&color_url=FFFFFF&color_border=FFFFFF&ad_type=text&ga_vid=583001034.1194442938&ga_sid=1194442938&ga_hid=1942779085&flash=9&u_h=768&u_w=1024&u_ah=740&u_aw=1024&u_cd=32&u_tz=480&u_java=true" frameborder="0" width="336" scrolling="no" height="280" allowtransparency="allowtransparency"> #define IDM_EXIT           100
#define IDM_TEST           200
#define IDM_ABOUT          300

#define DLG_VERFIRST        400
#define DLG_VERLAST         404

LRESULT CALLBACK WndProc(HWND, UINT, WPARAM, LPARAM);
LRESULT CALLBACK About  (HWND, UINT, WPARAM, LPARAM);

#include "drv_comm.h" #include "stack_config.h" #include "bmt_trc.h" #include "bmt_chr_setting.h" #include "bmt_utility.h" #include "bmt_hw.h" #include "sgm41512sd.h" extern int SGM41512SD_set_watchdog_timer(int time); extern int SGM41512SD_set_term_curr(kal_uint32 uA); extern int SGM41512SD_set_prechrg_curr(int uA); extern int SGM41512SD_get_vindpm_offset_os(); extern int SGM41512SD_set_vindpm_offset_os(kal_uint8 offset_os); extern int SGM41512SD_enable_otg(unsigned char en); extern int SGM41512SD_get_ichg_curr(kal_uint32 *uA); extern int SGM41512SD_set_ichrg_curr( unsigned int uA); extern int SGM41512SD_set_chrg_volt(kal_uint32 chrg_volt); extern int SGM41512SD_get_chrg_volt(unsigned int *volt); extern int SGM41512SD_set_input_volt_lim(unsigned int vindpm); extern int SGM41512SD_get_input_volt_lim(kal_uint32 *uV); extern int SGM41512SD_set_input_curr_lim(unsigned int iindpm); extern int SGM41512SD_get_input_curr_lim(unsigned int *ilim); extern int SGM41512SD_enable_charger(); extern int SGM41512SD_disable_charger(); extern int SGM41512SD_set_recharge_volt( int mV); extern int SGM41512SD_set_wdt_rst(unsigned char is_rst); extern int SGM41512SD_dump_register(); struct power_supply_battery_info { /* 基本信息 */ int energy_full_design_uwh; int charge_full_design_uah; int voltage_min_design_uv; int voltage_max_design_uv; /* 充电参数 */ int precharge_current_ua; int charge_term_current_ua; int constant_charge_current_max_ua; int constant_charge_voltage_max_uv; /* 温度相关参数 */ int temp_ambient_alert_min; int temp_ambient_alert_max; int temp_alert_min; int temp_alert_max; int temp_min; int temp_max; /* 电池阻抗 */ int factory_internal_resistance_uohm; /* OCV(开路电压)表 */ int *ocv_table; //int ocv_temp[C20_CAPACITY + 1]; int ocv_table_size; /* 其他高级参数 */ int over_voltage_limit_uv; int tst_level; int charge_restart_voltage_uv; int precharge_voltage_max_uv; int precharge_voltage_min_uv; }; int SGM41512SD_hw_init() { int ret = 0; struct power_supply_battery_info bat_info = {0}; bat_info.constant_charge_current_max_ua =500000;//480MA bat_info.constant_charge_voltage_max_uv =SGM41512SD_VREG_V_DEF_uV; bat_info.precharge_current_ua =SGM41512SD_PRECHRG_I_DEF_uA; bat_info.charge_term_current_ua =SGM41512SD_TERMCHRG_I_DEF_uA; GPIO_WriteIO(1, 20); // 使能IC SGM41512SD_set_watchdog_timer(0); ret = SGM41512SD_set_ichrg_curr(bat_info.constant_charge_current_max_ua); if (ret) goto err_out; ret = SGM41512SD_set_prechrg_curr(bat_info.precharge_current_ua); if (ret) goto err_out; ret = SGM41512SD_set_chrg_volt(bat_info.constant_charge_voltage_max_uv); if (ret) goto err_out; ret = SGM41512SD_set_term_curr(bat_info.charge_term_current_ua); if (ret) goto err_out; /* ret = SGM41512SD_set_input_volt_lim(); if (ret) goto err_out; ret = SGM41512SD_set_input_curr_lim(); if (ret) goto err_out; #if 0 ret = SGM41512SD_set_vac_ovp(sgm);//14V if (ret) goto err_out; #endif*/ ret = SGM41512SD_set_recharge_volt(200);//100~200mv if (ret) goto err_out; ret = SGM41512SD_enable_charger(); if (ret) { goto err_out; } return 0; err_out: SGM41512SD_disable_charger(); return ret; }
08-31
#include "tst_test.h" #include "tst_safe_macros.h" #include "lapi/sched.h" #define MAX_TRIES 1000 static void child_func(void) { int fd, len, event_found, tries; struct sockaddr_nl sa; char buffer[4096]; struct nlmsghdr *nlh; /* child will listen to a network interface create/delete/up/down events */ memset(&sa, 0, sizeof(sa)); sa.nl_family = AF_NETLINK; sa.nl_groups = RTMGRP_LINK; fd = SAFE_SOCKET(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE); SAFE_BIND(fd, (struct sockaddr *) &sa, sizeof(sa)); /* waits for parent to create an interface */ TST_CHECKPOINT_WAKE_AND_WAIT(0); /* * To get rid of "resource temporarily unavailable" errors * when testing with -i option */ tries = 0; event_found = 0; nlh = (struct nlmsghdr *) buffer; while (tries < MAX_TRIES) { len = recv(fd, nlh, sizeof(buffer), MSG_DONTWAIT); if (len > 0) { /* stop receiving only on interface create/delete event */ if (nlh->nlmsg_type == RTM_NEWLINK || nlh->nlmsg_type == RTM_DELLINK) { event_found++; break; } } usleep(10000); tries++; } SAFE_CLOSE(fd); if (event_found) tst_res(TPASS, "interface changes detected"); else tst_res(TFAIL, "failed to detect interface changes"); exit(0); } static void test_netns_netlink(void) { /* unshares the network namespace */ SAFE_UNSHARE(CLONE_NEWNET); if (SAFE_FORK() == 0) child_func(); /* wait until child opens netlink socket */ TST_CHECKPOINT_WAIT(0); /* creates TAP network interface dummy0 */ if (WEXITSTATUS(system("ip tuntap add dev dummy0 mode tap"))) tst_brk(TBROK, "adding interface failed"); /* removes previously created dummy0 device */ if (WEXITSTATUS(system("ip tuntap del mode tap dummy0"))) tst_brk(TBROK, "removing interface failed"); /* allow child to continue */ TST_CHECKPOINT_WAKE(0); tst_reap_children(); } static struct tst_test test = { .test_all = test_netns_netlink, .needs_checkpoints = 1, .needs_root = 1, .forks_child = 1, .needs_kconfigs = (const char *[]) { "CONFIG_NET_NS=y", "CONFIG_TUN", NULL }, };
07-15
import argparse import os import torch from exp.exp_main import Exp_Main import random import numpy as np if __name__ == '__main__': parser = argparse.ArgumentParser(description='Autoformer & Transformer family for Time Series Forecasting') # random seed parser.add_argument('--random_seed', type=int, default=2021, help='random seed') # basic config parser.add_argument('--is_training', type=int, required=True, default=1, help='status') parser.add_argument('--model_id', type=str, required=True, default='test', help='model id') parser.add_argument('--model', type=str, required=True, default='Autoformer', help='model name, options: [Autoformer, Informer, Transformer]') # data loader parser.add_argument('--data', type=str, required=True, default='ETTm1', help='dataset type') parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file') parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file') parser.add_argument('--features', type=str, default='M', help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate') parser.add_argument('--target', type=str, default='sensor1_clean', help='target feature in S or MS task') parser.add_argument('--freq', type=str, default='d', help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h') parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints') # forecasting task parser.add_argument('--seq_len', type=int, default=12, help='input sequence length') parser.add_argument('--label_len', type=int, default=6, help='start token length') parser.add_argument('--pred_len', type=int, default=1, help='prediction sequence length') # DLinear #parser.add_argument('--individual', action='store_true', default=False, help='DLinear: a linear layer for each variate(channel) individually') # PatchTST parser.add_argument('--fc_dropout', type=float, default=0.05, help='fully connected dropout') parser.add_argument('--head_dropout', type=float, default=0.0, help='head dropout') parser.add_argument('--patch_len', type=int, default=16, help='patch length') parser.add_argument('--stride', type=int, default=8, help='stride') parser.add_argument('--padding_patch', default='end', help='None: None; end: padding on the end') parser.add_argument('--revin', type=int, default=1, help='RevIN; True 1 False 0') parser.add_argument('--affine', type=int, default=0, help='RevIN-affine; True 1 False 0') parser.add_argument('--subtract_last', type=int, default=0, help='0: subtract mean; 1: subtract last') parser.add_argument('--decomposition', type=int, default=0, help='decomposition; True 1 False 0') parser.add_argument('--kernel_size', type=int, default=25, help='decomposition-kernel') parser.add_argument('--individual', type=int, default=0, help='individual head; True 1 False 0') # Formers parser.add_argument('--embed_type', type=int, default=0, help='0: default 1: value embedding + temporal embedding + positional embedding 2: value embedding + temporal embedding 3: value embedding + positional embedding 4: value embedding') parser.add_argument('--enc_in', type=int, default=10, help='encoder input size') # DLinear with --individual, use this hyperparameter as the number of channels parser.add_argument('--dec_in', type=int, default=10, help='decoder input size') parser.add_argument('--c_out', type=int, default=10, help='output size') parser.add_argument('--d_model', type=int, default=128, help='dimension of model') parser.add_argument('--n_heads', type=int, default=4, help='num of heads') parser.add_argument('--e_layers', type=int, default=1, help='num of encoder layers') parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers') parser.add_argument('--d_ff', type=int, default=256, help='dimension of fcn') parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average') parser.add_argument('--factor', type=int, default=1, help='attn factor') parser.add_argument('--distil', action='store_false', help='whether to use distilling in encoder, using this argument means not using distilling', default=True) parser.add_argument('--dropout', type=float, default=0.05, help='dropout') parser.add_argument('--embed', type=str, default='timeF', help='time features encoding, options:[timeF, fixed, learned]') parser.add_argument('--activation', type=str, default='gelu', help='activation') parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder') parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data') # optimization parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers') parser.add_argument('--itr', type=int, default=2, help='experiments times') parser.add_argument('--train_epochs', type=int, default=30, help='train epochs') parser.add_argument('--batch_size', type=int, default=16, help='batch size of train input data') parser.add_argument('--patience', type=int, default=3, help='early stopping patience') parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate') parser.add_argument('--des', type=str, default='test', help='exp description') parser.add_argument('--loss', type=str, default='mse', help='loss function') parser.add_argument('--lradj', type=str, default='type3', help='adjust learning rate') parser.add_argument('--pct_start', type=float, default=0.3, help='pct_start') parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False) # GPU parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu') parser.add_argument('--gpu', type=int, default=0, help='gpu') parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False) parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus') parser.add_argument('--test_flop', action='store_true', default=False, help='See utils/tools for usage') args = parser.parse_args() # random seed fix_seed = args.random_seed random.seed(fix_seed) torch.manual_seed(fix_seed) np.random.seed(fix_seed) args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False if args.use_gpu and args.use_multi_gpu: args.dvices = args.devices.replace(' ', '') device_ids = args.devices.split(',') args.device_ids = [int(id_) for id_ in device_ids] args.gpu = args.device_ids[0] print('Args in experiment:') print(args) Exp = Exp_Main if args.is_training: for ii in range(args.itr): # setting record of experiments setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format( args.model_id, args.model, args.data, args.features, args.seq_len, args.label_len, args.pred_len, args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.factor, args.embed, args.distil, args.des,ii) exp = Exp(args) # set experiments print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting)) exp.train(setting) print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) exp.test(setting) if args.do_predict: print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) exp.predict(setting, True) torch.cuda.empty_cache() else: ii = 0 setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(args.model_id, args.model, args.data, args.features, args.seq_len, args.label_len, args.pred_len, args.d_model, args.n_heads, args.e_layers, args.d_layers, args.d_ff, args.factor, args.embed, args.distil, args.des, ii) exp = Exp(args) # set experiments print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting)) exp.test(setting, test=1) torch.cuda.empty_cache() 帮我看一下我的代码,到底有没有进行预测,感觉误差很小,这是真实预测出来的吗# exp/exp_main.py from data_provider.data_factory import data_provider from exp.exp_basic import Exp_Basic from utils.tools import EarlyStopping, adjust_learning_rate, test_params_flop from utils.metrics import metric import numpy as np import torch import torch.nn as nn from torch import optim from torch.optim import lr_scheduler import os import time import warnings import matplotlib.pyplot as plt warnings.filterwarnings('ignore') # ========= 通用小工具 ========= def ensure_dir(path): if not os.path.exists(path): os.makedirs(path, exist_ok=True) def plot_loss_curve(train_hist, val_hist, save_path): """ 训练/验证损失曲线: x 轴是 epoch 数(多少个 epoch 就有多少个点,不受 seq_len=12 影响) """ if not train_hist: return plt.figure(figsize=(6, 4)) plt.plot(train_hist, label="train") if val_hist: plt.plot(val_hist, label="val") plt.xlabel("epoch") plt.ylabel("loss") plt.title("Train and Val Loss") plt.legend() plt.tight_layout() plt.savefig(save_path, dpi=150) plt.close() def plot_batch_predictions(inputs, trues, preds, pred_len, folder_path, prefix="test"): """ 画一个 batch 中第 1 条样本: 左边:encoder 输入(真实) 右边:decoder 部分:真实/预测 注意:这里 inputs / trues / preds 已经是“反归一化后”的原始数值 """ try: if len(preds) == 0: return # inputs, trues, preds: [N, L, C] inputs = np.asarray(inputs) trues = np.asarray(trues) preds = np.asarray(preds) x = inputs[0] # [L_in, C] y_true = trues[0] # [L_out, C] y_pred = preds[0] # [L_out, C] ch = -1 # 取最后一个通道画(你可以改成 0 看第一列) x_ch = x[:, ch] y_true_ch = y_true[:, ch] y_pred_ch = y_pred[:, ch] gt = np.concatenate([x_ch, y_true_ch], axis=0) pd = np.concatenate([x_ch, y_pred_ch], axis=0) t = np.arange(len(gt)) plt.figure(figsize=(8, 4)) plt.plot(t, gt, label="Ground Truth") plt.plot(t, pd, label="Prediction") plt.axvline(len(x_ch) - 1, color="gray", linestyle="--", linewidth=1) plt.xlabel("Time index") plt.ylabel("Value") plt.title(f"{prefix} prediction example") plt.legend() plt.tight_layout() fname = os.path.join(folder_path, f"{prefix}_pred_0.png") plt.savefig(fname, dpi=150) plt.close() except Exception as e: print(f"[WARN] plot_batch_predictions error: {e}") pass def plot_full_series_predictions(inputs, trues, preds, pred_len, folder_path, prefix="test_full"): """ 把测试集所有窗口拼成一条“完整时间序列”: - 蓝色:全程真实值 Ground Truth (full) - 橙色:最后 30% 区间的预测值 Prediction (last 30%) 这段时间上的“真实值”仍然可见(蓝线),所以预测区间是“蓝+橙”两条线对比。 注意:此处 inputs / trues / preds 已经是反归一化后的原始数值。 """ try: if len(preds) == 0: return inputs = np.asarray(inputs) # [N, L_in, C] trues = np.asarray(trues) # [N, L_out, C] preds = np.asarray(preds) # [N, L_out, C] N, L_in, C = inputs.shape _, L_out, _ = trues.shape ch = 0 # 使用最后一个通道 gt_list = [] pd_list = [] for i in range(N): x_ch = inputs[i, :, ch] # encoder 段 y_true_ch = trues[i, :, ch] # decoder 真值 y_pred_ch = preds[i, :, ch] # decoder 预测 if i == 0: # 第一段:完整放入 encoder + 输出 gt_list.append(x_ch) gt_list.append(y_true_ch) pd_list.append(x_ch) pd_list.append(y_pred_ch) else: # 后续段:只追加最后 pred_len 点,避免大量重复 gt_list.append(y_true_ch[-pred_len:]) pd_list.append(y_pred_ch[-pred_len:]) full_gt = np.concatenate(gt_list, axis=0) # 全程真实值(包括预测区间) full_pd = np.concatenate(pd_list, axis=0) # 全程预测拼接 T = len(full_gt) split = int(T * 0.7) # 70% 分界(你要改成 80% 就把 0.7 改成 0.8) t = np.arange(T) plt.figure(figsize=(10, 4)) # 全程真实值(包括最后 30%) plt.plot(t, full_gt, label="Ground Truth (full)", linewidth=1.5) # 只在最后 30% 区间画预测曲线,与真实值重合对比 plt.plot(t[split:], full_pd[split:], label="Prediction (last 30%)", linewidth=1) plt.axvline(split, color="gray", linestyle="--", linewidth=1) plt.xlabel("Time index") plt.ylabel("Value") plt.title(f"{prefix} prediction example (full series)") plt.legend() plt.tight_layout() fname = os.path.join(folder_path, f"{prefix}_full_series.png") plt.savefig(fname, dpi=150) plt.close() except Exception as e: print(f"[WARN] plot_full_series_predictions error: {e}") pass # ========= 主实验类 ========= class Exp_Main(Exp_Basic): def __init__(self, args): super(Exp_Main, self).__init__(args) # 单独实现 build_model,避免依赖 Exp_Basic 里的 NotImplemented def _build_model(self): from models import Informer, Autoformer, Transformer, DLinear, Linear, NLinear, PatchTST model_dict = { 'Autoformer': Autoformer, 'Transformer': Transformer, 'Informer': Informer, 'DLinear': DLinear, 'NLinear': NLinear, 'Linear': Linear, 'PatchTST': PatchTST, } model = model_dict[self.args.model].Model(self.args).float() if self.args.use_multi_gpu and self.args.use_gpu: model = nn.DataParallel(model, device_ids=self.args.device_ids) return model def _get_data(self, flag): data_set, data_loader = data_provider(self.args, flag) return data_set, data_loader def _select_optimizer(self): return optim.Adam(self.model.parameters(), lr=self.args.learning_rate) def _select_criterion(self): return nn.MSELoss() # 验证集损失 def vali(self, vali_data, vali_loader, criterion): self.model.eval() total_loss = [] with torch.no_grad(): for batch_x, batch_y, batch_x_mark, batch_y_mark in vali_loader: batch_x = batch_x.float().to(self.device) batch_y = batch_y.float() batch_x_mark = batch_x_mark.float().to(self.device) batch_y_mark = batch_y_mark.float().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() dec_inp = torch.cat( [batch_y[:, :self.args.label_len, :], dec_inp], dim=1 ).to(self.device) if 'Linear' in self.args.model or 'TST' in self.args.model: outputs = self.model(batch_x) else: if self.args.output_attention: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] else: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) f_dim = -1 if self.args.features == 'MS' else 0 outputs = outputs[:, -self.args.pred_len:, f_dim:] batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device) loss = criterion(outputs, batch_y) total_loss.append(loss.item()) self.model.train() return float(np.mean(total_loss)) if total_loss else float('inf') # 训练 def train(self, setting): train_data, train_loader = self._get_data('train') vali_data, vali_loader = self._get_data('val') test_data, test_loader = self._get_data('test') ckpt_dir = os.path.join(self.args.checkpoints, setting) ensure_dir(ckpt_dir) train_steps = len(train_loader) early_stopping = EarlyStopping(patience=self.args.patience, verbose=True) model_optim = self._select_optimizer() criterion = self._select_criterion() if self.args.use_amp: scaler = torch.cuda.amp.GradScaler() scheduler = lr_scheduler.OneCycleLR( optimizer=model_optim, steps_per_epoch=train_steps, pct_start=self.args.pct_start, epochs=self.args.train_epochs, max_lr=self.args.learning_rate ) train_hist = [] val_hist = [] for epoch in range(self.args.train_epochs): epoch_loss = [] self.model.train() t0 = time.time() for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader): model_optim.zero_grad() batch_x = batch_x.float().to(self.device) batch_y = batch_y.float().to(self.device) batch_x_mark = batch_x_mark.float().to(self.device) batch_y_mark = batch_y_mark.float().to(self.device) # decoder input dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() dec_inp = torch.cat( [batch_y[:, :self.args.label_len, :], dec_inp], dim=1 ).to(self.device) if self.args.use_amp: with torch.cuda.amp.autocast(): if 'Linear' in self.args.model or 'TST' in self.args.model: outputs = self.model(batch_x) else: if self.args.output_attention: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] else: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) f_dim = -1 if self.args.features == 'MS' else 0 outputs = outputs[:, -self.args.pred_len:, f_dim:] target = batch_y[:, -self.args.pred_len:, f_dim:] loss = criterion(outputs, target) scaler.scale(loss).backward() scaler.step(model_optim) scaler.update() else: if 'Linear' in self.args.model or 'TST' in self.args.model: outputs = self.model(batch_x) else: if self.args.output_attention: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] else: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) f_dim = -1 if self.args.features == 'MS' else 0 outputs = outputs[:, -self.args.pred_len:, f_dim:] target = batch_y[:, -self.args.pred_len:, f_dim:] loss = criterion(outputs, target) loss.backward() model_optim.step() if self.args.lradj == 'TST': adjust_learning_rate(model_optim, scheduler, epoch + 1, self.args, printout=False) scheduler.step() epoch_loss.append(loss.item()) train_loss = float(np.mean(epoch_loss)) if epoch_loss else float('inf') vali_loss = self.vali(vali_data, vali_loader, criterion) test_loss = self.vali(test_data, test_loader, criterion) train_hist.append(train_loss) val_hist.append(vali_loss) print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - t0)) print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format( epoch + 1, train_steps, train_loss, vali_loss, test_loss)) early_stopping(vali_loss, self.model, ckpt_dir) if early_stopping.early_stop: print("Early stopping") break if self.args.lradj != 'TST': adjust_learning_rate(model_optim, scheduler, epoch + 1, self.args) else: print('Updating learning rate to {}'.format(scheduler.get_last_lr()[0])) # 保存损失曲线(epoch 数量 = 你训练的 epoch 数) result_dir = os.path.join('results', setting) ensure_dir(result_dir) plot_loss_curve(train_hist, val_hist, os.path.join(result_dir, 'loss_curve.png')) best_model_path = os.path.join(ckpt_dir, 'checkpoint.pth') self.model.load_state_dict(torch.load(best_model_path)) return self.model # 测试 def test(self, setting, test=0): test_data, test_loader = self._get_data('test') if test: self.model.load_state_dict( torch.load(os.path.join('./checkpoints', setting, 'checkpoint.pth')) ) self.model.eval() preds = [] trues = [] inputs = [] folder_path = os.path.join('results', setting) ensure_dir(folder_path) with torch.no_grad(): for batch_x, batch_y, batch_x_mark, batch_y_mark in test_loader: batch_x = batch_x.float().to(self.device) batch_y = batch_y.float().to(self.device) batch_x_mark = batch_x_mark.float().to(self.device) batch_y_mark = batch_y_mark.float().to(self.device) dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float() dec_inp = torch.cat( [batch_y[:, :self.args.label_len, :], dec_inp], dim=1 ).to(self.device) if 'Linear' in self.args.model or 'TST' in self.args.model: outputs = self.model(batch_x) else: if self.args.output_attention: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] else: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) f_dim = -1 if self.args.features == 'MS' else 0 outputs = outputs[:, -self.args.pred_len:, f_dim:] batch_y = batch_y[:, -self.args.pred_len:, f_dim:] preds.append(outputs.detach().cpu().numpy()) trues.append(batch_y.detach().cpu().numpy()) inputs.append(batch_x.detach().cpu().numpy()) if self.args.test_flop: test_params_flop((batch_x.shape[1], batch_x.shape[2])) exit() preds = np.array(preds) trues = np.array(trues) inputs = np.array(inputs) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) inputs = inputs.reshape(-1, inputs.shape[-2], inputs.shape[-1]) # ========= 反归一化:还原成 Excel 原始数值 ========= scaler = getattr(test_data, 'scaler', None) if scaler is not None: # inputs N_in, L_in, C_in = inputs.shape inputs_2d = inputs.reshape(-1, C_in) inputs_2d = scaler.inverse_transform(inputs_2d) inputs = inputs_2d.reshape(N_in, L_in, C_in) # preds / trues N_out, L_out, C_out = preds.shape preds_2d = preds.reshape(-1, C_out) trues_2d = trues.reshape(-1, C_out) preds_2d = scaler.inverse_transform(preds_2d) trues_2d = scaler.inverse_transform(trues_2d) preds = preds_2d.reshape(N_out, L_out, C_out) trues = trues_2d.reshape(N_out, L_out, C_out) # ========= 反归一化结束 ========= mae, mse, rmse, mape, mspe, rse, corr = metric(preds, trues) print('mse:{}, mae:{}, rse:{}'.format(mse, mae, rse)) with open("result.txt", "a", encoding="utf-8") as f: f.write(setting + "\n") f.write('mse:{}, mae:{}, rse:{}'.format(mse, mae, rse)) f.write('\n\n') np.save(os.path.join(folder_path, 'pred.npy'), preds) # 单窗口示例 + 全序列图(此时已经是原始数值) plot_batch_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="test") plot_full_series_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="test") return # 预测模式(如果用 --do_predict) def predict(self, setting, load=False): pred_data, pred_loader = self._get_data('pred') if load: best_model_path = os.path.join(self.args.checkpoints, setting, 'checkpoint.pth') self.model.load_state_dict(torch.load(best_model_path)) self.model.eval() preds = [] inputs = [] trues = [] with torch.no_grad(): for batch_x, batch_y, batch_x_mark, batch_y_mark in pred_loader: batch_x = batch_x.float().to(self.device) batch_y = batch_y.float() batch_x_mark = batch_x_mark.float().to(self.device) batch_y_mark = batch_y_mark.float().to(self.device) dec_inp = torch.zeros( [batch_y.shape[0], self.args.pred_len, batch_y.shape[2]] ).float().to(self.device) dec_inp = torch.cat( [batch_y[:, :self.args.label_len, :], dec_inp], dim=1 ).to(self.device) if 'Linear' in self.args.model or 'TST' in self.args.model: outputs = self.model(batch_x) else: if self.args.output_attention: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0] else: outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark) preds.append(outputs.detach().cpu().numpy()) inputs.append(batch_x.detach().cpu().numpy()) trues.append(batch_y[:, -self.args.pred_len:, :].detach().cpu().numpy()) preds = np.array(preds) inputs = np.array(inputs) trues = np.array(trues) preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1]) inputs = inputs.reshape(-1, inputs.shape[-2], inputs.shape[-1]) trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1]) # 预测模式下同样做反归一化 scaler = getattr(pred_data, 'scaler', None) if scaler is not None: N_in, L_in, C_in = inputs.shape inputs_2d = inputs.reshape(-1, C_in) inputs_2d = scaler.inverse_transform(inputs_2d) inputs = inputs_2d.reshape(N_in, L_in, C_in) N_out, L_out, C_out = preds.shape preds_2d = preds.reshape(-1, C_out) trues_2d = trues.reshape(-1, C_out) preds_2d = scaler.inverse_transform(preds_2d) trues_2d = scaler.inverse_transform(trues_2d) preds = preds_2d.reshape(N_out, L_out, C_out) trues = trues_2d.reshape(N_out, L_out, C_out) folder_path = os.path.join('results', setting) ensure_dir(folder_path) np.save(os.path.join(folder_path, 'real_prediction.npy'), preds) # 单窗口 + 全序列预测图 plot_batch_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="predict") plot_full_series_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="predict") return
最新发布
11-12
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值