Source code for dgl.nn.pytorch.conv.densegraphconv
"""Torch Module for DenseGraphConv"""# pylint: disable= no-member, arguments-differ, invalid-nameimporttorchasthfromtorchimportnnfromtorch.nnimportinit
[docs]classDenseGraphConv(nn.Module):"""Graph Convolutional layer from `Semi-Supervised Classification with Graph Convolutional Networks <https://arxiv.org/abs/1609.02907>`__ We recommend user to use this module when applying graph convolution on dense graphs. Parameters ---------- in_feats : int Input feature size; i.e, the number of dimensions of :math:`h_j^{(l)}`. out_feats : int Output feature size; i.e., the number of dimensions of :math:`h_i^{(l+1)}`. norm : str, optional How to apply the normalizer. If is `'right'`, divide the aggregated messages by each node's in-degrees, which is equivalent to averaging the received messages. If is `'none'`, no normalization is applied. Default is `'both'`, where the :math:`c_{ij}` in the paper is applied. bias : bool, optional If True, adds a learnable bias to the output. Default: ``True``. activation : callable activation function/layer or None, optional If not None, applies an activation function to the updated node features. Default: ``None``. Notes ----- Zero in-degree nodes will lead to all-zero output. A common practice to avoid this is to add a self-loop for each node in the graph, which can be achieved by setting the diagonal of the adjacency matrix to be 1. Example ------- >>> import dgl >>> import numpy as np >>> import torch as th >>> from dgl.nn import DenseGraphConv >>> >>> feat = th.ones(6, 10) >>> adj = th.tensor([[0., 0., 1., 0., 0., 0.], ... [1., 0., 0., 0., 0., 0.], ... [0., 1., 0., 0., 0., 0.], ... [0., 0., 1., 0., 0., 1.], ... [0., 0., 0., 1., 0., 0.], ... [0., 0., 0., 0., 0., 0.]]) >>> conv = DenseGraphConv(10, 2) >>> res = conv(adj, feat) >>> res tensor([[0.2159, 1.9027], [0.3053, 2.6908], [0.3053, 2.6908], [0.3685, 3.2481], [0.3053, 2.6908], [0.0000, 0.0000]], grad_fn=<AddBackward0>) See also -------- `GraphConv <https://docs.dgl.ai/api/python/nn.pytorch.html#graphconv>`__ """def__init__(self,in_feats,out_feats,norm="both",bias=True,activation=None):super(DenseGraphConv,self).__init__()self._in_feats=in_featsself._out_feats=out_featsself._norm=normself.weight=nn.Parameter(th.Tensor(in_feats,out_feats))ifbias:self.bias=nn.Parameter(th.Tensor(out_feats))else:self.register_buffer("bias",None)self.reset_parameters()self._activation=activation
[docs]defforward(self,adj,feat):r"""Compute (Dense) Graph Convolution layer. Parameters ---------- adj : torch.Tensor The adjacency matrix of the graph to apply Graph Convolution on, when applied to a unidirectional bipartite graph, ``adj`` should be of shape should be of shape :math:`(N_{out}, N_{in})`; when applied to a homo graph, ``adj`` should be of shape :math:`(N, N)`. In both cases, a row represents a destination node while a column represents a source node. feat : torch.Tensor The input feature. Returns ------- torch.Tensor The output feature of shape :math:`(N, D_{out})` where :math:`D_{out}` is size of output feature. """adj=adj.to(feat)src_degrees=adj.sum(dim=0).clamp(min=1)dst_degrees=adj.sum(dim=1).clamp(min=1)feat_src=featifself._norm=="both":norm_src=th.pow(src_degrees,-0.5)shp=norm_src.shape+(1,)*(feat.dim()-1)norm_src=th.reshape(norm_src,shp).to(feat.device)feat_src=feat_src*norm_srcifself._in_feats>self._out_feats:# mult W first to reduce the feature size for aggregation.feat_src=th.matmul(feat_src,self.weight)rst=adj@feat_srcelse:# aggregate first then mult Wrst=adj@feat_srcrst=th.matmul(rst,self.weight)ifself._norm!="none":ifself._norm=="both":norm_dst=th.pow(dst_degrees,-0.5)else:# rightnorm_dst=1.0/dst_degreesshp=norm_dst.shape+(1,)*(feat.dim()-1)norm_dst=th.reshape(norm_dst,shp).to(feat.device)rst=rst*norm_dstifself.biasisnotNone:rst=rst+self.biasifself._activationisnotNone:rst=self._activation(rst)returnrst