使用Transformer编码器进行序列数据分类(上)
搭建Encoder Class
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
len_traj = 13
batch_size = 3
d_obs = 6
d_embed = 7 # embedding dimension
n_heads = 8
d_k = 16
d_hidden = 16
d_class = 2
n_layers = 4 # Encoder内含
trajectory = torch.rand(batch_size, len_traj, d_obs)
class Embedding(nn.Module):
'''将轨迹序列映射到隐空间'''
def __init__(self, inpt_dim, embed_dim):
super(Embedding, self).__init__()
self.fc = nn.Linear(inpt_dim, embed_dim)
def forward(self, x):
x = self.fc(x)
return x
class ScaledDotProductAttention(nn.Module):
def __init__(self, d_k):
super(ScaledDotProductAttention, self).__init__()
self.d_k = d_k
def forward(self, Q, K, V):
# scores : [batch_size x n_heads x len_q(=len_k) x len_k(=len_q)] [1,8,5,5]
scores = torch.matmul(Q, K.transpose(-1, -2)) / np.sqrt(self.d_k