transformer代码部分(关于原理网上资料已经很多,这里只展示代码,数据集为开源ETT数据集)
本文采用nn.Transformer,nn.Transformer没有加入embedding、位置embedding、linear+softmax
本文使用的是时序数据,embedding使用的是nn.Linear;位置embedding使用的是传统常见的位置embedding,详情见代码;nn.Transformer之后再接一个nn.Linear做预测
encoder的输入序列长度是seq_length = 96,decoder的输入序列长度label_length + pred_length = 72
1. 导入必须要的包
import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import plotly.express as px
from sklearn import metrics
from torch.utils.data import Dataset, DataLoader
2. 定义transformer网络
class PositionalEncoding(nn.Module):
def __init__(self, d_model, device, max_len=5000):
super(PositionalEncoding, self).__init__()
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) * -(np.log(10000.0) / d_model))
pe = torch.zeros(max_len, d_model)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.pe = pe.unsqueeze(0).transpose(1, 0).to(device)
def forward(self, x):
return self.pe[:x.size(0), :]
class TransformerTimeSeriesModel(nn.Module):
def __init__(self, input_size, d_model, device, nhead,
num_encoder_layers, num_decoder_layers, dim_feedforward, dropout=0.1):
super(TransformerTimeSeriesModel, self).__init__()
self.value_encoding = nn.Linear(input_size, d_model)
self.positional_encoding = PositionalEncoding(d_model, device)
self.transformer = nn.Transformer(d_model=d_model, nhead=nhead,