Source code for torch_geometric.nn.conv.hypergraph_conv

import torch
from torch.nn import Parameter
from torch.nn import functional as F
from torch_scatter import scatter_add
from torch_geometric.utils import softmax, degree
from torch_geometric.nn.conv import MessagePassing

from ..inits import glorot, zeros


[docs]class HypergraphConv(MessagePassing): r"""The hypergraph convolutional operator from the `"Hypergraph Convolution and Hypergraph Attention" <https://arxiv.org/abs/1901.08150>`_ paper .. math:: \mathbf{X}^{\prime} = \mathbf{D}^{-1} \mathbf{H} \mathbf{W} \mathbf{B}^{-1} \mathbf{H}^{\top} \mathbf{X} \mathbf{\Theta} where :math:`\mathbf{H} \in {\{ 0, 1 \}}^{N \times M}` is the incidence matrix, :math:`\mathbf{W}` is the diagonal hyperedge weight matrix, and :math:`\mathbf{D}` and :math:`\mathbf{B}` are the corresponding degree matrices. Args: in_channels (int): Size of each input sample. out_channels (int): Size of each output sample. use_attention (bool, optional): If set to :obj:`True`, attention will be added to this layer. (default: :obj:`False`) heads (int, optional): Number of multi-head-attentions. (default: :obj:`1`) concat (bool, optional): If set to :obj:`False`, the multi-head attentions are averaged instead of concatenated. (default: :obj:`True`) negative_slope (float, optional): LeakyReLU angle of the negative slope. (default: :obj:`0.2`) dropout (float, optional): Dropout probability of the normalized attention coefficients which exposes each node to a stochastically sampled neighborhood during training. (default: :obj:`0`) bias (bool, optional): If set to :obj:`False`, the layer will not learn an additive bias. (default: :obj:`True`) """ def __init__(self, in_channels, out_channels, use_attention=False, heads=1, concat=True, negative_slope=0.2, dropout=0, bias=True): super(HypergraphConv, self).__init__(aggr='add') self.in_channels = in_channels self.out_channels = out_channels self.use_attention = use_attention if self.use_attention: self.heads = heads self.concat = concat self.negative_slope = negative_slope self.dropout = dropout self.weight = Parameter( torch.Tensor(in_channels, heads * out_channels)) self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels)) else: self.heads = 1 self.concat = True self.weight = Parameter(torch.Tensor(in_channels, out_channels)) if bias and concat: self.bias = Parameter(torch.Tensor(heads * out_channels)) elif bias and not concat: self.bias = Parameter(torch.Tensor(out_channels)) else: self.register_parameter('bias', None) self.reset_parameters()
[docs] def reset_parameters(self): glorot(self.weight) if self.use_attention: glorot(self.att) zeros(self.bias)
def __forward__(self, x, hyperedge_index, hyperedge_weight=None, alpha=None): if hyperedge_weight is None: D = degree(hyperedge_index[0], x.size(0), x.dtype) else: D = scatter_add( hyperedge_weight[hyperedge_index[1]], hyperedge_index[0], dim=0, dim_size=x.size(0)) D = 1.0 / D D[D == float("inf")] = 0 num_edges = hyperedge_index[1].max().item() + 1 B = 1.0 / degree(hyperedge_index[1], num_edges, x.dtype) B[B == float("inf")] = 0 if hyperedge_weight is not None: B = B * hyperedge_weight self.flow = 'source_to_target' out = self.propagate(hyperedge_index, x=x, norm=B, alpha=alpha) self.flow = 'target_to_source' out = self.propagate(hyperedge_index, x=out, norm=D, alpha=alpha) return out def message(self, x_j, edge_index_i, norm, alpha): out = norm[edge_index_i].view(-1, 1, 1) * x_j.view( -1, self.heads, self.out_channels) if alpha is not None: out = alpha.view(-1, self.heads, 1) * out return out
[docs] def forward(self, x, hyperedge_index, hyperedge_weight=None): r""" Args: x (Tensor): Node feature matrix :math:`\mathbf{X}` hyper_edge_index (LongTensor): Hyperedge indices from :math:`\mathbf{H}`. hyperedge_weight (Tensor, optional): Sparse hyperedge weights from :math:`\mathbf{W}`. (default: :obj:`None`) """ x = torch.matmul(x, self.weight) alpha = None if self.use_attention: x = x.view(-1, self.heads, self.out_channels) x_i, x_j = x[hyperedge_index[0]], x[hyperedge_index[1]] alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1) alpha = F.leaky_relu(alpha, self.negative_slope) alpha = softmax(alpha, hyperedge_index[0], x.size(0)) alpha = F.dropout(alpha, p=self.dropout, training=self.training) out = self.__forward__(x, hyperedge_index, hyperedge_weight, alpha) if self.concat is True: out = out.view(-1, self.heads * self.out_channels) else: out = out.mean(dim=1) if self.bias is not None: out = out + self.bias return out
def __repr__(self): return "{}({}, {})".format(self.__class__.__name__, self.in_channels, self.out_channels)