import torch
import torch.nn as nn
import torch.optim as optim
import dgl
from dgl.nn.pytorch import GraphConv
class GCA(nn.Module):
def __init__(self, in_feats, hidden_size, out_feats):
super(GCA, self).__init__()
self.encoder = nn.Sequential(
GraphConv(in_feats, hidden_size),
nn.ReLU()
)
self.decoder = nn.Sequential(
GraphConv(hidden_size, out_feats),
nn.Sigmoid()
)
def forward(self, g, x):
h = self.encoder[0](g, x)
h = self.encoder[1](h)
x_recon = self.decoder[0](g, h)
x_recon = self.decoder[1](x_recon)
return x_recon
# 创建图数据
def create_graph():
g = dgl.graph(( torch.tensor([0,1,2,3]), torch.tensor([1,2,3,0]) ))
#g = dgl.add_self_loop(g)
features = torch.tensor([[1, 2, 3, 4],
[2, 3, 4, 1],
[3, 4, 1, 2],
[4, 1, 2, 3]], dtype=torch.float32)
return g, features
# 初始化
g, x = create_graph()
g = g.to('cpu')
x = x.to('cpu')
model = GCA(in_feats=4, hidden_size=64, out_feats=4)
optimizer = optim.Adam(model.parameters(), lr=0.01)
criterion = nn.MSELoss()
model.train()
print(f"Graph has {g.number_of_nodes()} nodes and {g.number_of_edges()} edges")
for epoch in range(200):
optimizer.zero_grad()
x_recon = model(g, x)
loss = criterion(x_recon, x)
loss.backward()
optimizer.step()
print(f'Epoch: {epoch:03d}, Loss: {loss.item():.4f}')
针对自己在学习GCN中遇到的一些问题,在这里简单记录一下。
这是我在模拟实现一个简单的图自编码器中遇到的一些问题:
1.当运行这段代码时,初始创建图g的代码如下:g = dgl.graph((torch.tensor([1, 2, 3, 4]), torch.tensor([2, 3, 4, 1]))),而节点特征矩阵创建如代码所示,是一个4×4的矩阵。在上述创建完毕后点击运行,大概率会出现'RuntimeError: The size of tensor a (4) must match the size of tensor b (5) at non-singleton dimension 0'的错误,这是因为出现了维度不匹配的问题,在(torch.tensor([1,2,3,4]),torch.tensor([2,3,4,1]))中,graph是根据索引值进行创建,所以代码默认从下标索引为1开始创建,其中就多包含了索引值为0的节点,这就是'
print(f"Graph has {g.number_of_nodes()} nodes and {g.number_of_edges()} edges")
'打印出来会显示5个node的原因,所以在创建图g的时候,一定是要从索引为0开始。如上述代码中创建的图则为:0->1,1->2,2->3,3->0。
2. 很多时候会出现from dgl.nn import GraphConv 中GraphConv标红,显示在'__init__'中找不到什么GraphConv,只要将导入语句修改为“ from dgl.nn.pytorch import GraphConv”就ok了。
2186





