TFT时间序列预测

本文介绍了如何利用Python和PyTorch框架构建数据集,进而进行时间序列预测。重点在于深度学习模型TFT(Temporal Fusion Transformer)的应用。

摘要生成于 C知道 ,由 DeepSeek-R1 满血版支持, 前往体验 >

from torch import nn
import math
import torch
import ipdb
class GLU(nn.Module):
    #Gated Linear Unit
    def __init__(self,input_size):
        super(GLU, self).__init__()

        self.fc1=nn.Linear(input_size,input_size)
        self.fc2=nn.Linear(input_size,input_size)

        self.sigmoid=nn.Sigmoid()

    def forward(self,x):
        sig=self.sigmoid(self.fc1(x))
        x=self.fc2(x)
        return torch.mul(sig,x)

class TimeDistributed(nn.Module):
    ## Takes any module and stacks the time dimension with the batch dimenison of inputs before apply the module
    ## From: https://discuss.pytorch.org/t/any-pytorch-function-can-work-as-keras-timedistributed/1346/4
    # 模块化用来改变输入大小,考虑到直接用Linear层对多维数据处理可能出问题,单独处理
    def __init__(self, module, batch_first=False):
        super(TimeDistributed, self).__init__()
        self.module = module
        self.batch_first = batch_first

    def forward(self, x):

        if len(x.size()) <= 2:
            return self.module(x)

        # Squash samples and timesteps into a single axis
        x_reshape = x.contiguous().view(-1, x.size(-1))  # (samples * timesteps, input_size),view变换原矩阵的大小,需要原矩阵的内存是整块的。
        # print(x_reshape.device)

        y = self.module(x_reshape)

        # We have to reshape Y
        if self.batch_first:
            y = y.contiguous().view(x.size(0), -1, y.size(-1))  # (samples, timesteps, output_size)
        else:
            y = y.view(-1, x.size(1), y.size(-1))  # (timesteps, samples, output_size)

        return y

class GRN(nn.Module):
    # GatedResidualNetwork
    def __init__(self,input_size,hidden_state_size,output_size,drop_out,hidden_context_size=None,batch_first=False):
        super(GRN, self).__init__()
        self.input_size=input_size
        self.output_size=output_size
        self.hidden_context_size=hidden_context_size
        self.hidden_state_size=hidden_state_size
        self.drop_out=drop_out

        if self.input_size!=self.output_size:
            self.skip_layer=TimeDistributed(nn.Linear(self.input_size,self.output_size))
        self.fc1=TimeDistributed(nn.Linear(self.input_size,self.hidden_state_size),batch_first=batch_first)
        self.elu1=nn.ELU()

        if self.hidden_context_size is not None:
            # 如果c能够传递的话,将c的大小化为和a的大小一致
            self.context=TimeDistributed(nn.Linear(self.hidden_context_size,self.hidden_state_size),batch_first=batch_first)
        self.fc2=TimeDistributed(nn.Linear(self.hidden_state_size,self.output_size),batch_first=batch_first)
        # self.elu2=nn.ELU()#做不做问题不大
        self.dropout=nn.Dropout(self.drop_out)
        self.ln=TimeDistributed(nn.LayerNorm(self.output_size),batch_first=batch_first)#层归一化归一化最后k个维度
        self.gate=TimeDistributed(GLU(self.output_size),batch_first=batch_first)



    def forward(self,x,context=None):

        if self.input_size!=self.output_size:
            residual=self.skip_layer(x)
        else:
            residual=x
        x=self.fc1(x)
        if context is not None:
            context=self.context(context)
            x=x+context
        x=self.elu1(x)

        x=self.fc2(x)
        x=self.dropout(x)
        x=self.gate(x)
        x=x+residual
        x=self.ln(x)
        return x

class PositionalEncoder(nn.Module):##仿照transformer层添加位置编码,有点多此一举
    def __init__(self,d_model,max_seq_len=160):
        super(PositionalEncoder, self).__init__()
        self.d_model=d_model#Embedding大小,输入为(seq_len,batch_size,index)-->(seq_len,batch_size,input_size)
        pe=torch.zeros(max_seq_len,d_model)
        for pos in range(max_seq_len):
            for i in range(0,d_model,2):
                pe[pos, i] = \
                    math.sin(pos / (10000 ** ((2 * i) / d_model)))
                pe[pos, i + 1] = \
                    math.cos(pos / (10000 ** ((2 * (i + 1)) / d_model)))
        pe = pe.unsqueeze(0)
        self.register_buffer('pe', pe)

    def forward(self,x):
        with torch.no_grad():
            x=x*math.sqrt(self.d_model)
            seq_len=x.size(0)
            pe=self.pe[:,:seq_len].view(seq_len,1,self.d_model)
            x=x+pe
            return x

class VSN(nn.Module):
    # Variable Selection Network
    def __init__(self,input_size,num_inputs,hidden_size,drop_out,context=None):
        super(VSN, self).__init__()

        self.hidden_size=hidden_size
        self.input_size=input_size
        self.num_inputs=num_inputs
        self.drop_out=drop_out
        self.context=context

        #num_inputs*input_size的原因是这里将所有的变量摊平了,原来先将num_inputs中的每个变量都做了embedding
        self.flattened_grn=GRN(input_size=self.num_inputs*self.input_size,hidden_state_size=self.hidden_size,output_size=self.num_inputs,drop_out=self.drop_out,hidden_context_size=self.context)

        self.single_variable_grns=nn.ModuleList()
        for i in range(self.num_inputs):
            self.single_variable_grns.append(GRN(self.input_size,self.hidden_size,self.hidden_size,self.drop_out))#为每一个展开变量均添加一个GRN

        self.softmax=nn.Softmax()

    def forward(self,embedding,context=None):
        sparse_weights=self.flattened_grn(embedding,context)#将embedding铺平+grn+softmax cat embedding

        sparse_weights=self.softmax(sparse_weights).unsqueeze(2)#强制加入第二个维度,【seq,bs,1,num_inputs]

        var_outputs=[]#对每一个emb计算GRN并化为列表
        for i in range(self.num_inputs):
            # 每个对应的输入维度的embedding分别求GRN,这里embedding后的维度为(seq,bs,input_size*num_inputs)
            var_outputs.append(self.single_variable_grns[i](embedding[:,:,(i*self.input_size):(i+1)*self.input_size]))

        var_outputs=torch.stack(var_outputs,dim=-1)#在最后一个维度上进行堆叠,结果为[seq,bs,input_size,num_inputs]
        # print(var_outputs.shape)
        # print(sparse_weights.shape)
        
评论 3
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包

打赏作者

飔、

你的鼓励将是我创作的最大动力

¥1 ¥2 ¥4 ¥6 ¥10 ¥20
扫码支付:¥1
获取中
扫码支付

您的余额不足,请更换扫码支付或充值

打赏作者

实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值