158158class MappingNetwork (nn .Module ):
159159 """
160160 <a id="mapping_network"></a>
161+
161162 ## Mapping Network
162163
163164 
@@ -196,6 +197,7 @@ def forward(self, z: torch.Tensor):
196197class Generator (nn .Module ):
197198 """
198199 <a id="generator"></a>
200+
199201 ## StyleGAN2 Generator
200202
201203 
@@ -276,6 +278,7 @@ def forward(self, w: torch.Tensor, input_noise: List[Tuple[Optional[torch.Tensor
276278class GeneratorBlock (nn .Module ):
277279 """
278280 <a id="generator_block"></a>
281+
279282 ### Generator Block
280283
281284 
@@ -327,6 +330,7 @@ def forward(self, x: torch.Tensor, w: torch.Tensor, noise: Tuple[Optional[torch.
327330class StyleBlock (nn .Module ):
328331 """
329332 <a id="style_block"></a>
333+
330334 ### Style Block
331335
332336 
@@ -377,6 +381,7 @@ def forward(self, x: torch.Tensor, w: torch.Tensor, noise: Optional[torch.Tensor
377381class ToRGB (nn .Module ):
378382 """
379383 <a id="to_rgb"></a>
384+
380385 ### To RGB
381386
382387 
@@ -489,6 +494,7 @@ def forward(self, x: torch.Tensor, s: torch.Tensor):
489494class Discriminator (nn .Module ):
490495 """
491496 <a id="discriminator"></a>
497+
492498 ## StyleGAN 2 Discriminator
493499
494500 
@@ -557,6 +563,7 @@ def forward(self, x: torch.Tensor):
557563class DiscriminatorBlock (nn .Module ):
558564 """
559565 <a id="discriminator_black"></a>
566+
560567 ### Discriminator Block
561568
562569 
@@ -645,6 +652,7 @@ def forward(self, x: torch.Tensor):
645652class DownSample (nn .Module ):
646653 """
647654 <a id="down_sample"></a>
655+
648656 ### Down-sample
649657
650658 The down-sample operation [smoothens](#smooth) each feature channel and
@@ -668,6 +676,7 @@ def forward(self, x: torch.Tensor):
668676class UpSample (nn .Module ):
669677 """
670678 <a id="up_sample"></a>
679+
671680 ### Up-sample
672681
673682 The up-sample operation scales the image up by $2 \t imes$ and [smoothens](#smooth) each feature channel.
@@ -690,6 +699,7 @@ def forward(self, x: torch.Tensor):
690699class Smooth (nn .Module ):
691700 """
692701 <a id="smooth"></a>
702+
693703 ### Smoothing Layer
694704
695705 This layer blurs each channel
@@ -729,6 +739,7 @@ def forward(self, x: torch.Tensor):
729739class EqualizedLinear (nn .Module ):
730740 """
731741 <a id="equalized_linear"></a>
742+
732743 ## Learning-rate Equalized Linear Layer
733744
734745 This uses [learning-rate equalized weights](#equalized_weights) for a linear layer.
@@ -755,6 +766,7 @@ def forward(self, x: torch.Tensor):
755766class EqualizedConv2d (nn .Module ):
756767 """
757768 <a id="equalized_conv2d"></a>
769+
758770 ## Learning-rate Equalized 2D Convolution Layer
759771
760772 This uses [learning-rate equalized weights](#equalized_weights) for a convolution layer.
@@ -784,6 +796,7 @@ def forward(self, x: torch.Tensor):
784796class EqualizedWeight (nn .Module ):
785797 """
786798 <a id="equalized_weight"></a>
799+
787800 ## Learning-rate Equalized Weights Parameter
788801
789802 This is based on equalized learning rate introduced in the Progressive GAN paper.
@@ -821,6 +834,7 @@ def forward(self):
821834class GradientPenalty (nn .Module ):
822835 """
823836 <a id="gradient_penalty"></a>
837+
824838 ## Gradient Penalty
825839
826840 This is the $R_1$ regularization penality from the paper
@@ -862,6 +876,7 @@ def forward(self, x: torch.Tensor, d: torch.Tensor):
862876class PathLengthPenalty (nn .Module ):
863877 """
864878 <a id="path_length_penalty"></a>
879+
865880 ## Path Length Penalty
866881
867882 This regularization encourages a fixed-size step in $w$ to result in a fixed-magnitude
0 commit comments