Skip to content

Commit f4f50cc

Browse files
committed
Generate Python docs from pytorch/pytorch@4ae71c8
1 parent b8364e6 commit f4f50cc

File tree

7 files changed

+98
-98
lines changed

7 files changed

+98
-98
lines changed

docs/master/_images/RReLU.png

-232 Bytes
Loading

docs/master/_modules/torch/ao/quantization/fake_quantize.html

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -426,7 +426,7 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
426426
<span class="k">def</span> <span class="nf">_is_float_qparams</span><span class="p">(</span><span class="n">qscheme</span><span class="p">:</span> <span class="s1">&#39;torch.qscheme&#39;</span><span class="p">)</span> <span class="o">-&gt;</span> <span class="nb">bool</span><span class="p">:</span>
427427
<span class="k">return</span> <span class="n">qscheme</span> <span class="ow">in</span> <span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">per_channel_affine_float_qparams</span><span class="p">,</span> <span class="p">]</span>
428428

429-
<span class="k">class</span> <span class="nc">FakeQuantizeBase</span><span class="p">(</span><span class="n">ABC</span><span class="p">,</span> <span class="n">Module</span><span class="p">):</span>
429+
<div class="viewcode-block" id="FakeQuantizeBase"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.FakeQuantizeBase.html#torch.quantization.fake_quantize.FakeQuantizeBase">[docs]</a><span class="k">class</span> <span class="nc">FakeQuantizeBase</span><span class="p">(</span><span class="n">ABC</span><span class="p">,</span> <span class="n">Module</span><span class="p">):</span>
430430
<span class="sa">r</span><span class="sd">&quot;&quot;&quot; Base fake quantize module</span>
431431
<span class="sd"> Any fake quantize implementation should derive from this class.</span>
432432

@@ -472,9 +472,9 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
472472
<span class="k">def</span> <span class="nf">disable_observer</span><span class="p">(</span><span class="bp">self</span><span class="p">):</span>
473473
<span class="bp">self</span><span class="o">.</span><span class="n">enable_observer</span><span class="p">(</span><span class="kc">False</span><span class="p">)</span>
474474

475-
<span class="n">with_args</span> <span class="o">=</span> <span class="nb">classmethod</span><span class="p">(</span><span class="n">_with_args</span><span class="p">)</span>
475+
<span class="n">with_args</span> <span class="o">=</span> <span class="nb">classmethod</span><span class="p">(</span><span class="n">_with_args</span><span class="p">)</span></div>
476476

477-
<span class="k">class</span> <span class="nc">FakeQuantize</span><span class="p">(</span><span class="n">FakeQuantizeBase</span><span class="p">):</span>
477+
<div class="viewcode-block" id="FakeQuantize"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.FakeQuantize.html#torch.quantization.fake_quantize.FakeQuantize">[docs]</a><span class="k">class</span> <span class="nc">FakeQuantize</span><span class="p">(</span><span class="n">FakeQuantizeBase</span><span class="p">):</span>
478478
<span class="sa">r</span><span class="sd">&quot;&quot;&quot; Simulate the quantize and dequantize operations in training time.</span>
479479
<span class="sd"> The output of this module is given by::</span>
480480

@@ -613,10 +613,10 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
613613
<span class="k">elif</span> <span class="n">strict</span><span class="p">:</span>
614614
<span class="n">missing_keys</span><span class="o">.</span><span class="n">append</span><span class="p">(</span><span class="n">key</span><span class="p">)</span>
615615
<span class="nb">super</span><span class="p">(</span><span class="n">FakeQuantize</span><span class="p">,</span> <span class="bp">self</span><span class="p">)</span><span class="o">.</span><span class="n">_load_from_state_dict</span><span class="p">(</span><span class="n">state_dict</span><span class="p">,</span> <span class="n">prefix</span><span class="p">,</span> <span class="n">local_metadata</span><span class="p">,</span> <span class="n">strict</span><span class="p">,</span>
616-
<span class="n">missing_keys</span><span class="p">,</span> <span class="n">unexpected_keys</span><span class="p">,</span> <span class="n">error_msgs</span><span class="p">)</span>
616+
<span class="n">missing_keys</span><span class="p">,</span> <span class="n">unexpected_keys</span><span class="p">,</span> <span class="n">error_msgs</span><span class="p">)</span></div>
617617

618618

619-
<span class="k">class</span> <span class="nc">FixedQParamsFakeQuantize</span><span class="p">(</span><span class="n">FakeQuantize</span><span class="p">):</span>
619+
<div class="viewcode-block" id="FixedQParamsFakeQuantize"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.FixedQParamsFakeQuantize.html#torch.quantization.fake_quantize.FixedQParamsFakeQuantize">[docs]</a><span class="k">class</span> <span class="nc">FixedQParamsFakeQuantize</span><span class="p">(</span><span class="n">FakeQuantize</span><span class="p">):</span>
620620
<span class="sd">&quot;&quot;&quot; Simulate quantize and dequantize with fixed quantization</span>
621621
<span class="sd"> parameters in training time. Only per tensor quantization</span>
622622
<span class="sd"> is supported.</span>
@@ -644,10 +644,10 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
644644
<span class="s1">&#39;dtype=</span><span class="si">{}</span><span class="s1">, quant_min=</span><span class="si">{}</span><span class="s1">, quant_max=</span><span class="si">{}</span><span class="s1">, qscheme=</span><span class="si">{}</span><span class="s1">&#39;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span>
645645
<span class="bp">self</span><span class="o">.</span><span class="n">fake_quant_enabled</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">observer_enabled</span><span class="p">,</span>
646646
<span class="bp">self</span><span class="o">.</span><span class="n">scale</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">zero_point</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">dtype</span><span class="p">,</span>
647-
<span class="bp">self</span><span class="o">.</span><span class="n">quant_min</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_max</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">qscheme</span><span class="p">)</span>
647+
<span class="bp">self</span><span class="o">.</span><span class="n">quant_min</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">quant_max</span><span class="p">,</span> <span class="bp">self</span><span class="o">.</span><span class="n">qscheme</span><span class="p">)</span></div>
648648

649649

650-
<span class="k">class</span> <span class="nc">FusedMovingAvgObsFakeQuantize</span><span class="p">(</span><span class="n">FakeQuantize</span><span class="p">):</span>
650+
<div class="viewcode-block" id="FusedMovingAvgObsFakeQuantize"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize.html#torch.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize">[docs]</a><span class="k">class</span> <span class="nc">FusedMovingAvgObsFakeQuantize</span><span class="p">(</span><span class="n">FakeQuantize</span><span class="p">):</span>
651651
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Fused module that is used to observe the input tensor (compute min/max), compute</span>
652652
<span class="sd"> scale/zero_point and fake_quantize the tensor.</span>
653653
<span class="sd"> This module uses calculation similar MovingAverageMinMaxObserver for the inputs,</span>
@@ -717,7 +717,7 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
717717
<span class="bp">self</span><span class="o">.</span><span class="n">ch_axis</span><span class="p">,</span>
718718
<span class="bp">self</span><span class="o">.</span><span class="n">is_per_channel</span><span class="p">,</span>
719719
<span class="bp">self</span><span class="o">.</span><span class="n">is_symmetric_quant</span><span class="p">,</span>
720-
<span class="p">)</span>
720+
<span class="p">)</span></div>
721721

722722
<span class="n">default_fake_quant</span> <span class="o">=</span> <span class="n">FakeQuantize</span><span class="o">.</span><span class="n">with_args</span><span class="p">(</span><span class="n">observer</span><span class="o">=</span><span class="n">MovingAverageMinMaxObserver</span><span class="p">,</span> <span class="n">quant_min</span><span class="o">=</span><span class="mi">0</span><span class="p">,</span> <span class="n">quant_max</span><span class="o">=</span><span class="mi">255</span><span class="p">,</span>
723723
<span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">quint8</span><span class="p">,</span> <span class="n">qscheme</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">per_tensor_affine</span><span class="p">,</span> <span class="n">reduce_range</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
@@ -816,7 +816,7 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
816816
<span class="n">name</span> <span class="o">==</span> <span class="s1">&#39;torch.ao.quantization.fake_quantize.FusedMovingAvgObsFakeQuantize&#39;</span>
817817
<span class="k">return</span> <span class="kc">False</span>
818818

819-
<div class="viewcode-block" id="disable_fake_quant"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.disable_fake_quant.html#torch.quantization.fake_quantize.disable_fake_quant">[docs]</a><span class="k">def</span> <span class="nf">disable_fake_quant</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
819+
<span class="k">def</span> <span class="nf">disable_fake_quant</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
820820
<span class="sd">&quot;&quot;&quot;</span>
821821
<span class="sd"> Disable fake quantization for this module, if applicable. Example usage::</span>
822822

@@ -825,9 +825,9 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
825825

826826
<span class="sd"> &quot;&quot;&quot;</span>
827827
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">FakeQuantizeBase</span><span class="p">)</span> <span class="ow">or</span> <span class="n">_is_fake_quant_script_module</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
828-
<span class="n">mod</span><span class="o">.</span><span class="n">disable_fake_quant</span><span class="p">()</span></div>
828+
<span class="n">mod</span><span class="o">.</span><span class="n">disable_fake_quant</span><span class="p">()</span>
829829

830-
<div class="viewcode-block" id="enable_fake_quant"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.enable_fake_quant.html#torch.quantization.fake_quantize.enable_fake_quant">[docs]</a><span class="k">def</span> <span class="nf">enable_fake_quant</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
830+
<span class="k">def</span> <span class="nf">enable_fake_quant</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
831831
<span class="sd">&quot;&quot;&quot;</span>
832832
<span class="sd"> Enable fake quantization for this module, if applicable. Example usage::</span>
833833

@@ -836,9 +836,9 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
836836

837837
<span class="sd"> &quot;&quot;&quot;</span>
838838
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">FakeQuantizeBase</span><span class="p">)</span> <span class="ow">or</span> <span class="n">_is_fake_quant_script_module</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
839-
<span class="n">mod</span><span class="o">.</span><span class="n">enable_fake_quant</span><span class="p">()</span></div>
839+
<span class="n">mod</span><span class="o">.</span><span class="n">enable_fake_quant</span><span class="p">()</span>
840840

841-
<div class="viewcode-block" id="disable_observer"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.disable_observer.html#torch.quantization.fake_quantize.disable_observer">[docs]</a><span class="k">def</span> <span class="nf">disable_observer</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
841+
<span class="k">def</span> <span class="nf">disable_observer</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
842842
<span class="sd">&quot;&quot;&quot;</span>
843843
<span class="sd"> Disable observation for this module, if applicable. Example usage::</span>
844844

@@ -847,9 +847,9 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
847847

848848
<span class="sd"> &quot;&quot;&quot;</span>
849849
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">FakeQuantizeBase</span><span class="p">)</span> <span class="ow">or</span> <span class="n">_is_fake_quant_script_module</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
850-
<span class="n">mod</span><span class="o">.</span><span class="n">disable_observer</span><span class="p">()</span></div>
850+
<span class="n">mod</span><span class="o">.</span><span class="n">disable_observer</span><span class="p">()</span>
851851

852-
<div class="viewcode-block" id="enable_observer"><a class="viewcode-back" href="../../../../generated/torch.quantization.fake_quantize.enable_observer.html#torch.quantization.fake_quantize.enable_observer">[docs]</a><span class="k">def</span> <span class="nf">enable_observer</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
852+
<span class="k">def</span> <span class="nf">enable_observer</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
853853
<span class="sd">&quot;&quot;&quot;</span>
854854
<span class="sd"> Enable observation for this module, if applicable. Example usage::</span>
855855

@@ -858,7 +858,7 @@ <h1>Source code for torch.ao.quantization.fake_quantize</h1><div class="highligh
858858

859859
<span class="sd"> &quot;&quot;&quot;</span>
860860
<span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">mod</span><span class="p">,</span> <span class="n">FakeQuantizeBase</span><span class="p">)</span> <span class="ow">or</span> <span class="n">_is_fake_quant_script_module</span><span class="p">(</span><span class="n">mod</span><span class="p">):</span>
861-
<span class="n">mod</span><span class="o">.</span><span class="n">enable_observer</span><span class="p">()</span></div>
861+
<span class="n">mod</span><span class="o">.</span><span class="n">enable_observer</span><span class="p">()</span>
862862
</pre></div>
863863

864864
</article>

0 commit comments

Comments
 (0)