Source code for torch_geometric.transforms.sign

import torch
from torch_sparse import SparseTensor

from torch_geometric.transforms import BaseTransform

[docs]class SIGN(BaseTransform): r"""The Scalable Inception Graph Neural Network module (SIGN) from the `"SIGN: Scalable Inception Graph Neural Networks" <>`_ paper, which precomputes the fixed representations .. math:: \mathbf{X}^{(i)} = {\left( \mathbf{D}^{-1/2} \mathbf{A} \mathbf{D}^{-1/2} \right)}^i \mathbf{X} for :math:`i \in \{ 1, \ldots, K \}` and saves them in :obj:`data.x1`, :obj:`data.x2`, ... .. note:: Since intermediate node representations are pre-computed, this operator is able to scale well to large graphs via classic mini-batching. For an example of using SIGN, see `examples/ <>`_. Args: K (int): The number of hops/layer. """ def __init__(self, K): self.K = K def __call__(self, data): assert data.edge_index is not None row, col = data.edge_index adj_t = SparseTensor(row=col, col=row, sparse_sizes=(data.num_nodes, data.num_nodes)) deg = adj_t.sum(dim=1).to(torch.float) deg_inv_sqrt = deg.pow(-0.5) deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0 adj_t = deg_inv_sqrt.view(-1, 1) * adj_t * deg_inv_sqrt.view(1, -1) assert data.x is not None xs = [data.x] for i in range(1, self.K + 1): xs += [adj_t @ xs[-1]] data[f'x{i}'] = xs[-1] return data def __repr__(self) -> str: return f'{self.__class__.__name__}(K={self.K})'