LapPENodeEncoder, LAP_DIM_PE(lrgb文件中的encoders文件中的laplace.py)

这段代码实现了一个节点编码器 LapPENodeEncoder用于将图节点的特征与拉普拉斯特征(Laplace Positional Embedding, LapPE)相结合拉普拉斯特征可以捕捉图结构中的空间信息,常用于图神经网络中,帮助节点在图中学习其位置和邻居信息。该编码器的目标是将拉普拉斯特征附加到节点的初始特征上,并通过可选的神经网络模型(如 DeepSetTransformer)对这些特征进行处理,生成最终的节点嵌入

from lrgb.encoders.laplace import LapPENodeEncoder, LAP_DIM_PE

import torch
import torch.nn as nn

LAP_DIM_PE = 16
MODEL = 'DeepSet'
LAYERS = 2
N_HEADS = 4
POST_LAYERS = 0
MAX_FREQS = 10
RAW_NORM_TYPE = 'none'
PASS_AS_VAR = False


class LapPENodeEncoder(torch.nn.Module):
    """Laplace Positional Embedding node encoder.

    LapPE of size dim_pe will get appended to each node feature vector.
    If `expand_x` set True, original node features will be first linearly
    projected to (dim_emb - dim_pe) size and the concatenated with LapPE.

    Args:
        dim_emb: Size of final node embedding
        expand_x: Expand node features `x` from dim_in to (dim_emb - dim_pe)
    """

    def __init__(self, dim_in, dim_emb, expand_x=True):
        super().__init__()
        dim_pe = LAP_DIM_PE  # Size of Laplace PE embedding
        model_type = MODEL  # Encoder NN model type for PEs
        if model_type not in ['Transformer', 'DeepSet']:
            raise ValueError(f"Unexpected PE model {model_type}")
        self.model_type = model_type
        n_layers = LAYERS  # Num. layers in PE encoder model
        n_heads = N_HEADS  # Num. attention heads in Trf PE encoder
        post_n_layers = POST_LAYERS  # Num. layers to apply after pooling
        max_freqs = MAX_FREQS  # Num. eigenvectors (frequencies)
        norm_type = RAW_NORM_TYPE.lower()  # Raw PE normalization layer type
        self.pass_as_var = PASS_AS_VAR  # Pass PE also as a separate variable

        if dim_emb - dim_pe < 0: # formerly 1, but you could have zero feature size
            raise ValueError(f"LapPE size {dim_pe} is too large for "
                             f"desired embedding size of {dim_emb}.")

        if expand_x and dim_emb - dim_pe > 0:
            self.linear_x = nn.Linear(dim_in, dim_emb - dim_pe)
        self.expand_x = expand_x and dim_emb - dim_pe > 0

        # Initial projection of eigenvalue and the node's eigenvector value
        self.linear_A = nn.Linear(2, dim_pe)
        if norm_type == 'batchnorm':
            self.raw_norm = nn.BatchNorm1d(max_freqs)
        else:
            self.raw_norm = None

        activation = nn.ReLU  # register.act_dict[cfg.gnn.act]
        if model_type == 'Transformer':
            # Transformer model for LapPE
            encoder_layer = nn.TransformerEncoderLayer(d_model=dim_pe,
                                                       nhead=n_heads,
                                                       batch_first=True)
            self.pe_encoder = nn.TransformerEncoder(encoder_layer,
                                                    num_layers=n_layers)
        else:
            # DeepSet model for LapPE
            layers = []
            if n_layers == 1:
                layers.append(activation())
            else:
                self.linear_A = nn.Linear(2, 2 * dim_pe)
                layers.append(activation())
                for _ in range(n_layers - 2):
                    layers.append(nn.Linear(2 * dim_pe, 2 * dim_pe))
                    layers.append(activation())
                layers.append(nn.Linear(2 * dim_pe, dim_pe))
                layers.append(activation())
            self.pe_encoder = nn.Sequential(*layers)

        self.post_mlp = None
        if post_n_layers > 0:
            # MLP to apply post pooling
            layers = []
            if post_n_layers == 1:
                layers.append(nn.Linear(dim_pe, dim_pe))
                layers.append(activation())
            else:
                layers.append(nn.Linear(dim_pe, 2 * dim_pe))
                layers.append(activation())
                for _ in range(post_n_layers - 2):
                    layers.append(nn.Linear(2 * dim_pe, 2 * dim_pe))
                    layers.append(activation())
                layers.append(nn.Linear(2 * dim_pe, dim_pe))
                layers.append(activation())
            self.post_mlp = nn.Sequential(*layers)

    def forward(self, x, pestat):
        EigVals = pestat[0]
        EigVecs = pestat[1]

        if self.training:
            sign_flip = torch.rand(EigVecs.size(1), device=EigVecs.device)
            sign_flip[sign_flip >= 0.5] = 1.0
            sign_flip[sign_flip < 0.5] = -1.0
            EigVecs = EigVecs * sign_flip.unsqueeze(0)

        pos_enc = torch.cat((EigVecs.unsqueeze(2), EigVals), dim=2) # (Num nodes) x (Num Eigenvectors) x 2
        empty_mask = torch.isnan(pos_enc)  # (Num nodes) x (Num Eigenvectors) x 2

        pos_enc[empty_mask] = 0  # (Num nodes) x (Num Eigenvectors) x 2
        if self.raw_norm:
            pos_enc = self.raw_norm(pos_enc)
        pos_enc = self.linear_A(pos_enc)  # (Num nodes) x (Num Eigenvectors) x dim_pe

        # PE encoder: a Transformer or DeepSet model
        if self.model_type == 'Transformer':
            pos_enc = self.pe_encoder(src=pos_enc,
                                      src_key_padding_mask=empty_mask[:, :, 0])
        else:
            pos_enc = self.pe_encoder(pos_enc)

        # Remove masked sequences; must clone before overwriting masked elements
        pos_enc = pos_enc.clone().masked_fill_(empty_mask[:, :, 0].unsqueeze(2),
                                               0.)

        # Sum pooling
        pos_enc = torch.sum(pos_enc, 1, keepdim=False)  # (Num nodes) x dim_pe

        # MLP post pooling
        if self.post_mlp is not None:
            pos_enc = self.post_mlp(pos_enc)  # (Num nodes) x dim_pe

        # Expand node features if needed
        if self.expand_x:
            h = self.linear_x(x)
        else:
            h = x
        # Concatenate final PEs to input embedding
        x = torch.cat((h, pos_enc), 1)
        return x

1. 全局常量

LAP_DIM_PE = 16
MODEL = 'DeepSet'
LAYERS = 2
N_HEADS = 4
POST_LAYERS = 0
MAX_FREQS = 10
RAW_NORM_TYPE = 'none'
PASS_AS_VAR = False
  • LAP_DIM_PE:设置拉普拉斯位置嵌入(LapPE)的维度大小为 16
  • MODEL:使用的编码模型,默认为 DeepSet,后续会根据这个值选择编码器。
  • LAYERS:编码器中的层数,默认值为 2,这会决定模型的深度。
  • N_HEADS如果选择 Transformer 作为模型,则多头自注意力中的头数为 4。
  • POST_LAYERS&
评论
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值