import argparse
import os
import torch
from exp.exp_main import Exp_Main
import random
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Autoformer & Transformer family for Time Series Forecasting')
# random seed
parser.add_argument('--random_seed', type=int, default=2021, help='random seed')
# basic config
parser.add_argument('--is_training', type=int, required=True, default=1, help='status')
parser.add_argument('--model_id', type=str, required=True, default='test', help='model id')
parser.add_argument('--model', type=str, required=True, default='Autoformer',
help='model name, options: [Autoformer, Informer, Transformer]')
# data loader
parser.add_argument('--data', type=str, required=True, default='ETTm1', help='dataset type')
parser.add_argument('--root_path', type=str, default='./data/ETT/', help='root path of the data file')
parser.add_argument('--data_path', type=str, default='ETTh1.csv', help='data file')
parser.add_argument('--features', type=str, default='M',
help='forecasting task, options:[M, S, MS]; M:multivariate predict multivariate, S:univariate predict univariate, MS:multivariate predict univariate')
parser.add_argument('--target', type=str, default='sensor1_clean', help='target feature in S or MS task')
parser.add_argument('--freq', type=str, default='d',
help='freq for time features encoding, options:[s:secondly, t:minutely, h:hourly, d:daily, b:business days, w:weekly, m:monthly], you can also use more detailed freq like 15min or 3h')
parser.add_argument('--checkpoints', type=str, default='./checkpoints/', help='location of model checkpoints')
# forecasting task
parser.add_argument('--seq_len', type=int, default=12, help='input sequence length')
parser.add_argument('--label_len', type=int, default=6, help='start token length')
parser.add_argument('--pred_len', type=int, default=1, help='prediction sequence length')
# DLinear
#parser.add_argument('--individual', action='store_true', default=False, help='DLinear: a linear layer for each variate(channel) individually')
# PatchTST
parser.add_argument('--fc_dropout', type=float, default=0.05, help='fully connected dropout')
parser.add_argument('--head_dropout', type=float, default=0.0, help='head dropout')
parser.add_argument('--patch_len', type=int, default=16, help='patch length')
parser.add_argument('--stride', type=int, default=8, help='stride')
parser.add_argument('--padding_patch', default='end', help='None: None; end: padding on the end')
parser.add_argument('--revin', type=int, default=1, help='RevIN; True 1 False 0')
parser.add_argument('--affine', type=int, default=0, help='RevIN-affine; True 1 False 0')
parser.add_argument('--subtract_last', type=int, default=0, help='0: subtract mean; 1: subtract last')
parser.add_argument('--decomposition', type=int, default=0, help='decomposition; True 1 False 0')
parser.add_argument('--kernel_size', type=int, default=25, help='decomposition-kernel')
parser.add_argument('--individual', type=int, default=0, help='individual head; True 1 False 0')
# Formers
parser.add_argument('--embed_type', type=int, default=0, help='0: default 1: value embedding + temporal embedding + positional embedding 2: value embedding + temporal embedding 3: value embedding + positional embedding 4: value embedding')
parser.add_argument('--enc_in', type=int, default=10, help='encoder input size') # DLinear with --individual, use this hyperparameter as the number of channels
parser.add_argument('--dec_in', type=int, default=10, help='decoder input size')
parser.add_argument('--c_out', type=int, default=10, help='output size')
parser.add_argument('--d_model', type=int, default=128, help='dimension of model')
parser.add_argument('--n_heads', type=int, default=4, help='num of heads')
parser.add_argument('--e_layers', type=int, default=1, help='num of encoder layers')
parser.add_argument('--d_layers', type=int, default=1, help='num of decoder layers')
parser.add_argument('--d_ff', type=int, default=256, help='dimension of fcn')
parser.add_argument('--moving_avg', type=int, default=25, help='window size of moving average')
parser.add_argument('--factor', type=int, default=1, help='attn factor')
parser.add_argument('--distil', action='store_false',
help='whether to use distilling in encoder, using this argument means not using distilling',
default=True)
parser.add_argument('--dropout', type=float, default=0.05, help='dropout')
parser.add_argument('--embed', type=str, default='timeF',
help='time features encoding, options:[timeF, fixed, learned]')
parser.add_argument('--activation', type=str, default='gelu', help='activation')
parser.add_argument('--output_attention', action='store_true', help='whether to output attention in ecoder')
parser.add_argument('--do_predict', action='store_true', help='whether to predict unseen future data')
# optimization
parser.add_argument('--num_workers', type=int, default=10, help='data loader num workers')
parser.add_argument('--itr', type=int, default=2, help='experiments times')
parser.add_argument('--train_epochs', type=int, default=30, help='train epochs')
parser.add_argument('--batch_size', type=int, default=16, help='batch size of train input data')
parser.add_argument('--patience', type=int, default=3, help='early stopping patience')
parser.add_argument('--learning_rate', type=float, default=0.0001, help='optimizer learning rate')
parser.add_argument('--des', type=str, default='test', help='exp description')
parser.add_argument('--loss', type=str, default='mse', help='loss function')
parser.add_argument('--lradj', type=str, default='type3', help='adjust learning rate')
parser.add_argument('--pct_start', type=float, default=0.3, help='pct_start')
parser.add_argument('--use_amp', action='store_true', help='use automatic mixed precision training', default=False)
# GPU
parser.add_argument('--use_gpu', type=bool, default=True, help='use gpu')
parser.add_argument('--gpu', type=int, default=0, help='gpu')
parser.add_argument('--use_multi_gpu', action='store_true', help='use multiple gpus', default=False)
parser.add_argument('--devices', type=str, default='0,1,2,3', help='device ids of multile gpus')
parser.add_argument('--test_flop', action='store_true', default=False, help='See utils/tools for usage')
args = parser.parse_args()
# random seed
fix_seed = args.random_seed
random.seed(fix_seed)
torch.manual_seed(fix_seed)
np.random.seed(fix_seed)
args.use_gpu = True if torch.cuda.is_available() and args.use_gpu else False
if args.use_gpu and args.use_multi_gpu:
args.dvices = args.devices.replace(' ', '')
device_ids = args.devices.split(',')
args.device_ids = [int(id_) for id_ in device_ids]
args.gpu = args.device_ids[0]
print('Args in experiment:')
print(args)
Exp = Exp_Main
if args.is_training:
for ii in range(args.itr):
# setting record of experiments
setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(
args.model_id,
args.model,
args.data,
args.features,
args.seq_len,
args.label_len,
args.pred_len,
args.d_model,
args.n_heads,
args.e_layers,
args.d_layers,
args.d_ff,
args.factor,
args.embed,
args.distil,
args.des,ii)
exp = Exp(args) # set experiments
print('>>>>>>>start training : {}>>>>>>>>>>>>>>>>>>>>>>>>>>'.format(setting))
exp.train(setting)
print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
exp.test(setting)
if args.do_predict:
print('>>>>>>>predicting : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
exp.predict(setting, True)
torch.cuda.empty_cache()
else:
ii = 0
setting = '{}_{}_{}_ft{}_sl{}_ll{}_pl{}_dm{}_nh{}_el{}_dl{}_df{}_fc{}_eb{}_dt{}_{}_{}'.format(args.model_id,
args.model,
args.data,
args.features,
args.seq_len,
args.label_len,
args.pred_len,
args.d_model,
args.n_heads,
args.e_layers,
args.d_layers,
args.d_ff,
args.factor,
args.embed,
args.distil,
args.des, ii)
exp = Exp(args) # set experiments
print('>>>>>>>testing : {}<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<'.format(setting))
exp.test(setting, test=1)
torch.cuda.empty_cache()
帮我看一下我的代码,到底有没有进行预测,感觉误差很小,这是真实预测出来的吗# exp/exp_main.py
from data_provider.data_factory import data_provider
from exp.exp_basic import Exp_Basic
from utils.tools import EarlyStopping, adjust_learning_rate, test_params_flop
from utils.metrics import metric
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.optim import lr_scheduler
import os
import time
import warnings
import matplotlib.pyplot as plt
warnings.filterwarnings('ignore')
# ========= 通用小工具 =========
def ensure_dir(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def plot_loss_curve(train_hist, val_hist, save_path):
"""
训练/验证损失曲线:
x 轴是 epoch 数(多少个 epoch 就有多少个点,不受 seq_len=12 影响)
"""
if not train_hist:
return
plt.figure(figsize=(6, 4))
plt.plot(train_hist, label="train")
if val_hist:
plt.plot(val_hist, label="val")
plt.xlabel("epoch")
plt.ylabel("loss")
plt.title("Train and Val Loss")
plt.legend()
plt.tight_layout()
plt.savefig(save_path, dpi=150)
plt.close()
def plot_batch_predictions(inputs, trues, preds, pred_len, folder_path, prefix="test"):
"""
画一个 batch 中第 1 条样本:
左边:encoder 输入(真实)
右边:decoder 部分:真实/预测
注意:这里 inputs / trues / preds 已经是“反归一化后”的原始数值
"""
try:
if len(preds) == 0:
return
# inputs, trues, preds: [N, L, C]
inputs = np.asarray(inputs)
trues = np.asarray(trues)
preds = np.asarray(preds)
x = inputs[0] # [L_in, C]
y_true = trues[0] # [L_out, C]
y_pred = preds[0] # [L_out, C]
ch = -1 # 取最后一个通道画(你可以改成 0 看第一列)
x_ch = x[:, ch]
y_true_ch = y_true[:, ch]
y_pred_ch = y_pred[:, ch]
gt = np.concatenate([x_ch, y_true_ch], axis=0)
pd = np.concatenate([x_ch, y_pred_ch], axis=0)
t = np.arange(len(gt))
plt.figure(figsize=(8, 4))
plt.plot(t, gt, label="Ground Truth")
plt.plot(t, pd, label="Prediction")
plt.axvline(len(x_ch) - 1, color="gray", linestyle="--", linewidth=1)
plt.xlabel("Time index")
plt.ylabel("Value")
plt.title(f"{prefix} prediction example")
plt.legend()
plt.tight_layout()
fname = os.path.join(folder_path, f"{prefix}_pred_0.png")
plt.savefig(fname, dpi=150)
plt.close()
except Exception as e:
print(f"[WARN] plot_batch_predictions error: {e}")
pass
def plot_full_series_predictions(inputs, trues, preds, pred_len, folder_path, prefix="test_full"):
"""
把测试集所有窗口拼成一条“完整时间序列”:
- 蓝色:全程真实值 Ground Truth (full)
- 橙色:最后 30% 区间的预测值 Prediction (last 30%)
这段时间上的“真实值”仍然可见(蓝线),所以预测区间是“蓝+橙”两条线对比。
注意:此处 inputs / trues / preds 已经是反归一化后的原始数值。
"""
try:
if len(preds) == 0:
return
inputs = np.asarray(inputs) # [N, L_in, C]
trues = np.asarray(trues) # [N, L_out, C]
preds = np.asarray(preds) # [N, L_out, C]
N, L_in, C = inputs.shape
_, L_out, _ = trues.shape
ch = 0 # 使用最后一个通道
gt_list = []
pd_list = []
for i in range(N):
x_ch = inputs[i, :, ch] # encoder 段
y_true_ch = trues[i, :, ch] # decoder 真值
y_pred_ch = preds[i, :, ch] # decoder 预测
if i == 0:
# 第一段:完整放入 encoder + 输出
gt_list.append(x_ch)
gt_list.append(y_true_ch)
pd_list.append(x_ch)
pd_list.append(y_pred_ch)
else:
# 后续段:只追加最后 pred_len 点,避免大量重复
gt_list.append(y_true_ch[-pred_len:])
pd_list.append(y_pred_ch[-pred_len:])
full_gt = np.concatenate(gt_list, axis=0) # 全程真实值(包括预测区间)
full_pd = np.concatenate(pd_list, axis=0) # 全程预测拼接
T = len(full_gt)
split = int(T * 0.7) # 70% 分界(你要改成 80% 就把 0.7 改成 0.8)
t = np.arange(T)
plt.figure(figsize=(10, 4))
# 全程真实值(包括最后 30%)
plt.plot(t, full_gt, label="Ground Truth (full)", linewidth=1.5)
# 只在最后 30% 区间画预测曲线,与真实值重合对比
plt.plot(t[split:], full_pd[split:], label="Prediction (last 30%)", linewidth=1)
plt.axvline(split, color="gray", linestyle="--", linewidth=1)
plt.xlabel("Time index")
plt.ylabel("Value")
plt.title(f"{prefix} prediction example (full series)")
plt.legend()
plt.tight_layout()
fname = os.path.join(folder_path, f"{prefix}_full_series.png")
plt.savefig(fname, dpi=150)
plt.close()
except Exception as e:
print(f"[WARN] plot_full_series_predictions error: {e}")
pass
# ========= 主实验类 =========
class Exp_Main(Exp_Basic):
def __init__(self, args):
super(Exp_Main, self).__init__(args)
# 单独实现 build_model,避免依赖 Exp_Basic 里的 NotImplemented
def _build_model(self):
from models import Informer, Autoformer, Transformer, DLinear, Linear, NLinear, PatchTST
model_dict = {
'Autoformer': Autoformer,
'Transformer': Transformer,
'Informer': Informer,
'DLinear': DLinear,
'NLinear': NLinear,
'Linear': Linear,
'PatchTST': PatchTST,
}
model = model_dict[self.args.model].Model(self.args).float()
if self.args.use_multi_gpu and self.args.use_gpu:
model = nn.DataParallel(model, device_ids=self.args.device_ids)
return model
def _get_data(self, flag):
data_set, data_loader = data_provider(self.args, flag)
return data_set, data_loader
def _select_optimizer(self):
return optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
def _select_criterion(self):
return nn.MSELoss()
# 验证集损失
def vali(self, vali_data, vali_loader, criterion):
self.model.eval()
total_loss = []
with torch.no_grad():
for batch_x, batch_y, batch_x_mark, batch_y_mark in vali_loader:
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
dec_inp = torch.cat(
[batch_y[:, :self.args.label_len, :], dec_inp],
dim=1
).to(self.device)
if 'Linear' in self.args.model or 'TST' in self.args.model:
outputs = self.model(batch_x)
else:
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
f_dim = -1 if self.args.features == 'MS' else 0
outputs = outputs[:, -self.args.pred_len:, f_dim:]
batch_y = batch_y[:, -self.args.pred_len:, f_dim:].to(self.device)
loss = criterion(outputs, batch_y)
total_loss.append(loss.item())
self.model.train()
return float(np.mean(total_loss)) if total_loss else float('inf')
# 训练
def train(self, setting):
train_data, train_loader = self._get_data('train')
vali_data, vali_loader = self._get_data('val')
test_data, test_loader = self._get_data('test')
ckpt_dir = os.path.join(self.args.checkpoints, setting)
ensure_dir(ckpt_dir)
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
if self.args.use_amp:
scaler = torch.cuda.amp.GradScaler()
scheduler = lr_scheduler.OneCycleLR(
optimizer=model_optim,
steps_per_epoch=train_steps,
pct_start=self.args.pct_start,
epochs=self.args.train_epochs,
max_lr=self.args.learning_rate
)
train_hist = []
val_hist = []
for epoch in range(self.args.train_epochs):
epoch_loss = []
self.model.train()
t0 = time.time()
for i, (batch_x, batch_y, batch_x_mark, batch_y_mark) in enumerate(train_loader):
model_optim.zero_grad()
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float().to(self.device)
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
dec_inp = torch.cat(
[batch_y[:, :self.args.label_len, :], dec_inp],
dim=1
).to(self.device)
if self.args.use_amp:
with torch.cuda.amp.autocast():
if 'Linear' in self.args.model or 'TST' in self.args.model:
outputs = self.model(batch_x)
else:
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
f_dim = -1 if self.args.features == 'MS' else 0
outputs = outputs[:, -self.args.pred_len:, f_dim:]
target = batch_y[:, -self.args.pred_len:, f_dim:]
loss = criterion(outputs, target)
scaler.scale(loss).backward()
scaler.step(model_optim)
scaler.update()
else:
if 'Linear' in self.args.model or 'TST' in self.args.model:
outputs = self.model(batch_x)
else:
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
f_dim = -1 if self.args.features == 'MS' else 0
outputs = outputs[:, -self.args.pred_len:, f_dim:]
target = batch_y[:, -self.args.pred_len:, f_dim:]
loss = criterion(outputs, target)
loss.backward()
model_optim.step()
if self.args.lradj == 'TST':
adjust_learning_rate(model_optim, scheduler, epoch + 1, self.args, printout=False)
scheduler.step()
epoch_loss.append(loss.item())
train_loss = float(np.mean(epoch_loss)) if epoch_loss else float('inf')
vali_loss = self.vali(vali_data, vali_loader, criterion)
test_loss = self.vali(test_data, test_loader, criterion)
train_hist.append(train_loss)
val_hist.append(vali_loss)
print("Epoch: {} cost time: {}".format(epoch + 1, time.time() - t0))
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, ckpt_dir)
if early_stopping.early_stop:
print("Early stopping")
break
if self.args.lradj != 'TST':
adjust_learning_rate(model_optim, scheduler, epoch + 1, self.args)
else:
print('Updating learning rate to {}'.format(scheduler.get_last_lr()[0]))
# 保存损失曲线(epoch 数量 = 你训练的 epoch 数)
result_dir = os.path.join('results', setting)
ensure_dir(result_dir)
plot_loss_curve(train_hist, val_hist, os.path.join(result_dir, 'loss_curve.png'))
best_model_path = os.path.join(ckpt_dir, 'checkpoint.pth')
self.model.load_state_dict(torch.load(best_model_path))
return self.model
# 测试
def test(self, setting, test=0):
test_data, test_loader = self._get_data('test')
if test:
self.model.load_state_dict(
torch.load(os.path.join('./checkpoints', setting, 'checkpoint.pth'))
)
self.model.eval()
preds = []
trues = []
inputs = []
folder_path = os.path.join('results', setting)
ensure_dir(folder_path)
with torch.no_grad():
for batch_x, batch_y, batch_x_mark, batch_y_mark in test_loader:
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float().to(self.device)
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
dec_inp = torch.zeros_like(batch_y[:, -self.args.pred_len:, :]).float()
dec_inp = torch.cat(
[batch_y[:, :self.args.label_len, :], dec_inp],
dim=1
).to(self.device)
if 'Linear' in self.args.model or 'TST' in self.args.model:
outputs = self.model(batch_x)
else:
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
f_dim = -1 if self.args.features == 'MS' else 0
outputs = outputs[:, -self.args.pred_len:, f_dim:]
batch_y = batch_y[:, -self.args.pred_len:, f_dim:]
preds.append(outputs.detach().cpu().numpy())
trues.append(batch_y.detach().cpu().numpy())
inputs.append(batch_x.detach().cpu().numpy())
if self.args.test_flop:
test_params_flop((batch_x.shape[1], batch_x.shape[2]))
exit()
preds = np.array(preds)
trues = np.array(trues)
inputs = np.array(inputs)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
inputs = inputs.reshape(-1, inputs.shape[-2], inputs.shape[-1])
# ========= 反归一化:还原成 Excel 原始数值 =========
scaler = getattr(test_data, 'scaler', None)
if scaler is not None:
# inputs
N_in, L_in, C_in = inputs.shape
inputs_2d = inputs.reshape(-1, C_in)
inputs_2d = scaler.inverse_transform(inputs_2d)
inputs = inputs_2d.reshape(N_in, L_in, C_in)
# preds / trues
N_out, L_out, C_out = preds.shape
preds_2d = preds.reshape(-1, C_out)
trues_2d = trues.reshape(-1, C_out)
preds_2d = scaler.inverse_transform(preds_2d)
trues_2d = scaler.inverse_transform(trues_2d)
preds = preds_2d.reshape(N_out, L_out, C_out)
trues = trues_2d.reshape(N_out, L_out, C_out)
# ========= 反归一化结束 =========
mae, mse, rmse, mape, mspe, rse, corr = metric(preds, trues)
print('mse:{}, mae:{}, rse:{}'.format(mse, mae, rse))
with open("result.txt", "a", encoding="utf-8") as f:
f.write(setting + "\n")
f.write('mse:{}, mae:{}, rse:{}'.format(mse, mae, rse))
f.write('\n\n')
np.save(os.path.join(folder_path, 'pred.npy'), preds)
# 单窗口示例 + 全序列图(此时已经是原始数值)
plot_batch_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="test")
plot_full_series_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="test")
return
# 预测模式(如果用 --do_predict)
def predict(self, setting, load=False):
pred_data, pred_loader = self._get_data('pred')
if load:
best_model_path = os.path.join(self.args.checkpoints, setting, 'checkpoint.pth')
self.model.load_state_dict(torch.load(best_model_path))
self.model.eval()
preds = []
inputs = []
trues = []
with torch.no_grad():
for batch_x, batch_y, batch_x_mark, batch_y_mark in pred_loader:
batch_x = batch_x.float().to(self.device)
batch_y = batch_y.float()
batch_x_mark = batch_x_mark.float().to(self.device)
batch_y_mark = batch_y_mark.float().to(self.device)
dec_inp = torch.zeros(
[batch_y.shape[0], self.args.pred_len, batch_y.shape[2]]
).float().to(self.device)
dec_inp = torch.cat(
[batch_y[:, :self.args.label_len, :], dec_inp],
dim=1
).to(self.device)
if 'Linear' in self.args.model or 'TST' in self.args.model:
outputs = self.model(batch_x)
else:
if self.args.output_attention:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)[0]
else:
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
preds.append(outputs.detach().cpu().numpy())
inputs.append(batch_x.detach().cpu().numpy())
trues.append(batch_y[:, -self.args.pred_len:, :].detach().cpu().numpy())
preds = np.array(preds)
inputs = np.array(inputs)
trues = np.array(trues)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
inputs = inputs.reshape(-1, inputs.shape[-2], inputs.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
# 预测模式下同样做反归一化
scaler = getattr(pred_data, 'scaler', None)
if scaler is not None:
N_in, L_in, C_in = inputs.shape
inputs_2d = inputs.reshape(-1, C_in)
inputs_2d = scaler.inverse_transform(inputs_2d)
inputs = inputs_2d.reshape(N_in, L_in, C_in)
N_out, L_out, C_out = preds.shape
preds_2d = preds.reshape(-1, C_out)
trues_2d = trues.reshape(-1, C_out)
preds_2d = scaler.inverse_transform(preds_2d)
trues_2d = scaler.inverse_transform(trues_2d)
preds = preds_2d.reshape(N_out, L_out, C_out)
trues = trues_2d.reshape(N_out, L_out, C_out)
folder_path = os.path.join('results', setting)
ensure_dir(folder_path)
np.save(os.path.join(folder_path, 'real_prediction.npy'), preds)
# 单窗口 + 全序列预测图
plot_batch_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="predict")
plot_full_series_predictions(inputs, trues, preds, self.args.pred_len, folder_path, prefix="predict")
return