【自己一点一点扒的代码 全网最详细 不全你找我】图卷积神经网络 GCN pychorch 代码 讲解+ 注释

二话不说直接上代码 注释讲解全在代码中
train.py

from __future__ import division
from __future__ import print_function

import time
import argparse
import numpy as np

import torch
import torch.nn.functional as F
import torch.optim as optim

from pygcn.utils import load_data, accuracy
from pygcn.models import GCN

# Training settings
#设置参数
parser = argparse.ArgumentParser()
#禁用cuda进行训练
parser.add_argument('--no-cuda', action='store_true', default=False,
                    help='Disables CUDA training.')
#在训练期间通过验证
parser.add_argument('--fastmode', action='store_true', default=False,
                    help='Validate during training pass.')
#设置随机种子
parser.add_argument('--seed', type=int, default=42, help='Random seed.')
#训练的迭代次数 默认是200
parser.add_argument('--epochs', type=int, default=200,
                    help='Number of epochs to train.')
#设置初始化学习率 默认的初始化学习率是0.01
parser.add_argument('--lr', type=float, default=0.01,
                    help='Initial learning rate.')
#设置权重的衰减 L2损失
parser.add_argument('--weight_decay', type=float, default=5e-4,
                    help='Weight decay (L2 loss on parameters).')
#设置初始化隐层的数量 默认隐层的数量是16
parser.add_argument('--hidden', type=int, default=16,
                    help='Number of hidden units.')
#设置dropout率 1-保持率
parser.add_argument('--dropout', type=float, default=0.5,
                    help='Dropout rate (1 - keep probability).')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()
'''
adj:已经对称化的邻接矩阵 已经进行压缩
features:已经进行归一化的特征矩阵A =D-1*A
labels:已经进行编码的标签


'''

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()


def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
    loss_train = F.nll_loss(output[idx_train], labels[idx_train])
    acc_train = accuracy(output[idx_train], labels[idx_train])
    loss_train.backward()
    optimizer.step()

    if not args.fastmode:
        # Evaluate validation set performance separately,
        # deactivates dropout during validation run.
        model.eval()
        output = model(features, adj)

    loss_val = F.nll_loss(output[idx_val], labels[idx_val])
    acc_val = accuracy(output[idx_val], labels[idx_val])
    print('Epoch: {:04d}'.format(epoch+1),
          'loss_train: {:.4f}'.format(loss_train.item()),
          'acc_train: {:.4f}'.format(acc_train.item()),
          'loss_val: {:.4f}'.format(loss_val.item()),
          'acc_val: {:.4f}'.format(acc_val.item()),
          'time: {:.4f}s'.format(time.time() - t))


def test():
    model.eval()
    output = model(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:",
          "loss= {:.4f}".format(loss_test.item()),
          "accuracy= {:.4f}".format(acc_test.item()))


# Train model
t_total = time.time()
for epoch in range(args.epochs):
    train(epoch)
print("Optimization Finished!")
print("Total time elapsed: {:.4f}s".format(time.time() - t_total))

# Testing
test()

model.py

import torch.nn as nn
import torch.nn.functional as F
from pygcn.layers import GraphConvolution


class GCN(nn.Module):
    '''
    nfeat:输入的特征矩阵
    nhid:隐层的数量
    nclass:最后输出 这里可以理解为 最后输出分类的数量
    dropout率

    '''
    def __init__(self, nfeat, nhid, nclass, dropout):
        super(GCN, self).__init__()

        self.gc1 = GraphConvolution(nfeat, nhid)
        self.gc2 = GraphConvolution(nhid, nclass)
        self.dropout = dropout

    def forward(self, x, adj):

        x = F.relu(self.gc1(x, adj))
        x = F.dropout(x, self.dropout, training=self.training)
        #上面是第一层 x=ReLu(AXW)
        x = self.gc2(x, adj)
        return F.log_softmax(x, dim=1)
        #retrn x=softmax(A ReLu(AXW)W) 这里的A是特征矩阵 已经在数据处理中办成D-1*A

layers.py

import math

import torch

from torch.nn.parameter import Parameter
from torch.nn.modules.module import Module


class GraphConvolution(Module):
    """
    Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
    """

    def __init__(self, in_features, out_features, bias=True):
        '''
        in_features;输入特征 即 A features utils中已经进行处理成为A=D-1*A
         out_features:输出特征
        '''
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        if bias:
            self.bias = Parameter(torch.FloatTensor(out_features))
        else:
            self.register_parameter('bias', None)
        self.reset_parameters()
    #Linear的权重初始化 系统自带 不需要懂
    def reset_parameters(self):
        stdv = 1. / math.sqrt(self.weight.size(1))
        self.weight.data.uniform_(-stdv, stdv)
        if self.bias is not None:
            self.bias.data.uniform_(-stdv, stdv)
    #最重要的前向传播
    def forward(self, input, adj):
        #将输入的矩阵和权重进行相乘
        support = torch.mm(input, self.weight)
        # 输出=邻接矩阵*输入矩阵*权重   如果有偏执则加上偏执
        output = torch.spmm(adj, support)
        if self.bias is not None:
            return output + self.bias
        else:
            return output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'

untils.py

import numpy as np
import scipy.sparse as sp
import torch


def encode_onehot(labels):
    classes = set(labels)
    classes_dict = {c: np.identity(len(classes))[i, :] for i, c in
                    enumerate(classes)}
    labels_onehot = np.array(list(map(classes_dict.get, labels)),
                             dtype=np.int32)
    return labels_onehot


def load_data(path="../data/cora/", dataset="cora"):
    """Load citation network dataset (cora only for now)"""
    print('Loading {} dataset...'.format(dataset))
    #从cora.content中读取数据  <paper_id><paper_attribute><class_label>
    #其中  paper_id 0列 paper_attribute从1列到倒数第二列 class_label最后一列
    idx_features_labels = np.genfromtxt("{}{}.content".format(path, dataset),
                                        dtype=np.dtype(str))
    #通过sp.csr_matrix压缩features稀疏矩阵 [:,1:-1]所有的行 列是从第二列到倒数第二列
    features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)
    #变成0 1编码系列  labels是所有的行 列是最后一列
    labels = encode_onehot(idx_features_labels[:, -1])

    # build graph
    #idx是content中对应的paper_id
    idx = np.array(idx_features_labels[:, 0], dtype=np.int32)
    #enumerate(需要枚举的序列,指定序号值)--->index ,item
    #map(函数名,可迭代的序列)return 集合  i--paper_id所在的数组的索引 j paper_id
    #这样以来 idx_map 的格式是 {index:paper_id}
    idx_map = {j: i for i, j in enumerate(idx)}
    #读取.cites文件中的内容   被引用的paper_id  引用的paper_id
    #每行包含两个纸张ID。第一个条目是被引用论文的ID,第二个ID代表包含引用的论文。链接的方向是从右向左。
    # 如果一行由“paper1 paper2”表示,则链接为“paper2->paper1”。
    edges_unordered = np.genfromtxt("{}{}.cites".format(path, dataset),
                                    dtype=np.int32)
    #idx_map.get:获取idx_map中的value值 即:index 将edges_unordered降成一维
    #edges:[paper-id index:边]
    '''
      edges_faltten is  [     35    1033      35 ...  853118  954315 1155073]:将edges_unordered数组按照行的顺序拉平
      idx_map.get返回的是index
      edges:[index,edges_faltten拉平的值] 然后将其重新映射为edges_unordered的形状 [边->边]
      edgesid_map_get is  [[ 163  402]
                           [ 163  659]
                           [ 163 1696] 
                           [1887 2258]
                           [1902 1887]
                           [ 837 1686]]
      
      '''
    edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),
                     dtype=np.int32).reshape(edges_unordered.shape)

    #adj邻接矩阵
    #sp.coo_matrix:生成邻接矩阵
    #sp.coo_matrix((data, (row, col)), shape=(4, 4))
    #这里面的data是np.ones(edges.shape[0])【初始化index个1】  (row,col )是(edges[:, 0] 【edges的第0列】, edges[:, 1]【edges的第一列】)
    #形状是label 的行  相当于node_size*node_size
    #np.array.shape :返回的是数组的行列(行,列)  np.array.shape[0]:返回的是数组的行的数目  np.array.shape[1]:返回的是数组的列的数码

    adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),
                        shape=(labels.shape[0], labels.shape[0]),
                        dtype=np.float32)

    # 文件中是有向图的邻接矩阵不是堆成矩阵要转成无向图的对称矩阵
    '''
    有向图的邻接矩阵转换成无向图的邻接矩阵的步骤以及意义
    (1)写出邻接矩阵的转置矩阵 adj.T
    (2)判断 adj.T>adj adj.T中的每个元素是否与adj中的对应元素大 如果大则对应的bool矩阵中写1 否则写0
    (3)然后将原始矩阵和上面的bool矩阵对应元素相乘,结果的大多数为0
    (4)adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) 目的是除去“原矩阵+转置”操作多加的值
    '''
    adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)
    #将特征矩阵进行归一化处理得到新的特征矩阵  A= D-1 * A
    features = normalize(features)
    # adj = normalize(adj + sp.eye(adj.shape[0]))

    idx_train = range(140)
    idx_val = range(200, 500)
    idx_test = range(500, 1500)

    features = torch.FloatTensor(np.array(features.todense()))
    labels = torch.LongTensor(np.where(labels)[1])
    adj = sparse_mx_to_torch_sparse_tensor(adj)

    idx_train = torch.LongTensor(idx_train)
    idx_val = torch.LongTensor(idx_val)
    idx_test = torch.LongTensor(idx_test)

    return adj, features, labels, idx_train, idx_val, idx_test

'''
数据归一化函数
'''
def normalize(mx):
    """
    Row-normalize sparse matrix
    行归一化稀疏矩阵
    sum(0):可以理解为按照列求和
    sun(1) 可以理解为按照行求和
    np.power()用于数组的n次方:
    r_inv = np.power(rowsum, -1).flatten():是求rowsum的逆矩阵 并将其拉成一维

    mx 可以看做输入的特征矩阵A
    A=D-1*A
    """
    #rowsum 相当于是图的都矩阵 D
    rowsum = np.array(mx.sum(1))
    #r_inv: D-1
    r_inv = np.power(rowsum, -1).flatten()
    #如果r_inv中有趋于无穷大的数字则将其所在的位置止于0
    r_inv[np.isinf(r_inv)] = 0.
    #sp.diags()对矩阵实行对角化
    #sp.diags(r_inv)构建对角矩阵是r_inv元素的对角矩阵
    r_mat_inv = sp.diags(r_inv)
    #A= D-1 * A
    mx = r_mat_inv.dot(mx)
    return mx


def accuracy(output, labels):
    preds = output.max(1)[1].type_as(labels)
    correct = preds.eq(labels).double()
    correct = correct.sum()
    return correct / len(labels)


def sparse_mx_to_torch_sparse_tensor(sparse_mx):
    """Convert a scipy sparse matrix to a torch sparse tensor."""
    sparse_mx = sparse_mx.tocoo().astype(np.float32)
    indices = torch.from_numpy(
        np.vstack((sparse_mx.row, sparse_mx.col)).astype(np.int64))
    values = torch.from_numpy(sparse_mx.data)
    shape = torch.Size(sparse_mx.shape)
    return torch.sparse.FloatTensor(indices, values, shape)

adj, features, labels, idx_train, idx_val, idx_test=load_data()
print('adj 邻接矩阵 is',adj)
print('---------------------------')
print('features 特征矩阵 is',features)
print('---------------------------')
print('labels 标签 is',labels)
print('---------------------------')
print(' idx_train 训练集is',idx_train)
print('---------------------------')
print('idx_val  验证集is',idx_val)
print('---------------------------')
print(' idx_test 测试集is',idx_test)
print('---------------------------')


评论 1
添加红包

请填写红包祝福语或标题

红包个数最小为10个

红包金额最低5元

当前余额3.43前往充值 >
需支付:10.00
成就一亿技术人!
领取后你会自动成为博主和红包主的粉丝 规则
hope_wisdom
发出的红包
实付
使用余额支付
点击重新获取
扫码支付
钱包余额 0

抵扣说明:

1.余额是钱包充值的虚拟货币,按照1:1的比例进行支付金额的抵扣。
2.余额无法直接购买下载,可以购买VIP、付费专栏及课程。

余额充值