@@ -140,10 +140,10 @@ def forward(self, h: torch.Tensor, adj_mat: torch.Tensor):
140140 g_repeat_interleave = g .repeat_interleave (n_nodes , dim = 0 )
141141 # Now we concatenate to get
142142 # $$\{\overrightarrow{g_1} \Vert \overrightarrow{g_1},
143- # \overrightarrow{g_1}, \Vert \overrightarrow{g_2},
143+ # \overrightarrow{g_1} \Vert \overrightarrow{g_2},
144144 # \dots, \overrightarrow{g_1} \Vert \overrightarrow{g_N},
145145 # \overrightarrow{g_2} \Vert \overrightarrow{g_1},
146- # \overrightarrow{g_2}, \Vert \overrightarrow{g_2},
146+ # \overrightarrow{g_2} \Vert \overrightarrow{g_2},
147147 # \dots, \overrightarrow{g_2} \Vert \overrightarrow{g_N}, ...\}$$
148148 g_concat = torch .cat ([g_repeat_interleave , g_repeat ], dim = - 1 )
149149 # Reshape so that `g_concat[i, j]` is $\overrightarrow{g_i} \Vert \overrightarrow{g_j}$
@@ -170,7 +170,7 @@ def forward(self, h: torch.Tensor, adj_mat: torch.Tensor):
170170
171171 # We then normalize attention scores (or coefficients)
172172 # $$\alpha_{ij} = \text{softmax}_j(e_{ij}) =
173- # \frac{\exp(e_{ij})}{\sum_{j \in \mathcal{N}_i} \exp(e_{ij })}$$
173+ # \frac{\exp(e_{ij})}{\sum_{k \in \mathcal{N}_i} \exp(e_{ik })}$$
174174 #
175175 # where $\mathcal{N}_i$ is the set of nodes connected to $i$.
176176 #
0 commit comments