Source code for torch_geometric.nn.conv.appnp

from torch_geometric.typing import Adj, OptTensor

from torch import Tensor
from torch_sparse import SparseTensor, matmul
from torch_geometric.nn.conv import MessagePassing
from torch_geometric.nn.conv.gcn_conv import gcn_norm


[docs]class APPNP(MessagePassing): r"""The approximate personalized propagation of neural predictions layer from the `"Predict then Propagate: Graph Neural Networks meet Personalized PageRank" <https://arxiv.org/abs/1810.05997>`_ paper .. math:: \mathbf{X}^{(0)} &= \mathbf{X} \mathbf{X}^{(k)} &= (1 - \alpha) \mathbf{\hat{D}}^{-1/2} \mathbf{\hat{A}} \mathbf{\hat{D}}^{-1/2} \mathbf{X}^{(k-1)} + \alpha \mathbf{X}^{(0)} \mathbf{X}^{\prime} &= \mathbf{X}^{(K)}, where :math:`\mathbf{\hat{A}} = \mathbf{A} + \mathbf{I}` denotes the adjacency matrix with inserted self-loops and :math:`\hat{D}_{ii} = \sum_{j=0} \hat{A}_{ij}` its diagonal degree matrix. Args: K (int): Number of iterations :math:`K`. alpha (float): Teleport probability :math:`\alpha`. add_self_loops (bool, optional): If set to :obj:`False`, will not add self-loops to the input graph. (default: :obj:`True`) **kwargs (optional): Additional arguments of :class:`torch_geometric.nn.conv.MessagePassing`. """ def __init__(self, K: int, alpha: float, add_self_loops: bool = True, **kwargs): super(APPNP, self).__init__(aggr='add', **kwargs) self.K = K self.alpha = alpha self.add_self_loops = add_self_loops
[docs] def forward(self, x: Tensor, edge_index: Adj, edge_weight: OptTensor = None) -> Tensor: """""" if isinstance(edge_index, Tensor): edge_index, edge_weight = gcn_norm( edge_index, edge_weight, x.size(self.node_dim), add_self_loops=self.add_self_loops, dtype=x.dtype) elif isinstance(edge_index, SparseTensor): edge_index = gcn_norm( # yapf: disable edge_index, edge_weight, x.size(self.node_dim), add_self_loops=self.add_self_loops, dtype=x.dtype) h = x for k in range(self.K): # propagate_type: (x: Tensor, edge_weight: OptTensor) x = self.propagate(edge_index, x=x, edge_weight=edge_weight, size=None) x = x * (1 - self.alpha) x += self.alpha * h return x
def message(self, x_j: Tensor, edge_weight: Tensor) -> Tensor: return edge_weight.view(-1, 1) * x_j def message_and_aggregate(self, adj_t: SparseTensor, x: Tensor) -> Tensor: return matmul(adj_t, x, reduce=self.aggr) def __repr__(self): return '{}(K={}, alpha={})'.format(self.__class__.__name__, self.K, self.alpha)