Skip to content

Commit e110542

Browse files
committed
Generate Python docs from pytorch/pytorch@49244d5
1 parent 2a48ae1 commit e110542

File tree

849 files changed

+2835
-1231
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

849 files changed

+2835
-1231
lines changed

docs/master/__config__.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@
189189

190190

191191
<div class="version">
192-
master (1.9.0a0+git4ec6b36 )
192+
master (1.9.0a0+git49244d5 )
193193
</div>
194194

195195

docs/master/_modules/index.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/__config__.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/_jit_internal.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/_lobpcg.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/_lowrank.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/_tensor.html

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

@@ -1315,7 +1315,7 @@ <h1>Source code for torch._tensor</h1><div class="highlight"><pre>
13151315
<span class="s2">&quot;attribute won&#39;t be populated during autograd.backward(). If you indeed want the gradient &quot;</span>
13161316
<span class="s2">&quot;for a non-leaf Tensor, use .retain_grad() on the non-leaf Tensor. If you access the &quot;</span>
13171317
<span class="s2">&quot;non-leaf Tensor by mistake, make sure you access the leaf Tensor instead. See &quot;</span>
1318-
<span class="s2">&quot;github.com/pytorch/pytorch/pull/30531 for more informations.&quot;</span><span class="p">,</span> <span class="n">stacklevel</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
1318+
<span class="s2">&quot;github.com/pytorch/pytorch/pull/30531 for more information.&quot;</span><span class="p">,</span> <span class="n">stacklevel</span><span class="o">=</span><span class="mi">2</span><span class="p">)</span>
13191319
<span class="k">return</span> <span class="bp">self</span><span class="o">.</span><span class="n">_grad</span>
13201320

13211321
<span class="nd">@grad</span><span class="o">.</span><span class="n">setter</span>

docs/master/_modules/torch/_tensor_str.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/_utils.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/_vmap_internals.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/autograd.html

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

@@ -455,7 +455,7 @@ <h1>Source code for torch.autograd</h1><div class="highlight"><pre>
455455
<span class="n">retain_graph</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="nb">bool</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
456456
<span class="n">create_graph</span><span class="p">:</span> <span class="nb">bool</span> <span class="o">=</span> <span class="kc">False</span><span class="p">,</span>
457457
<span class="n">grad_variables</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">_TensorOrTensors</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
458-
<span class="n">inputs</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">Sequence</span><span class="p">[</span><span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">]]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
458+
<span class="n">inputs</span><span class="p">:</span> <span class="n">Optional</span><span class="p">[</span><span class="n">_TensorOrTensors</span><span class="p">]</span> <span class="o">=</span> <span class="kc">None</span><span class="p">,</span>
459459
<span class="p">)</span> <span class="o">-&gt;</span> <span class="kc">None</span><span class="p">:</span>
460460
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Computes the sum of gradients of given tensors w.r.t. graph leaves.</span>
461461

@@ -487,24 +487,24 @@ <h1>Source code for torch.autograd</h1><div class="highlight"><pre>
487487
<span class="sd"> :ref:`Stream semantics of backward passes&lt;bwd-cuda-stream-semantics&gt;`.</span>
488488

489489
<span class="sd"> Args:</span>
490-
<span class="sd"> tensors (sequence of Tensor): Tensors of which the derivative will be</span>
490+
<span class="sd"> tensors (Sequence[Tensor] or Tensor): Tensors of which the derivative will be</span>
491491
<span class="sd"> computed.</span>
492-
<span class="sd"> grad_tensors (sequence of (Tensor or None)): The &quot;vector&quot; in the Jacobian-vector</span>
493-
<span class="sd"> product, usually gradients w.r.t. each element of corresponding tensors.</span>
494-
<span class="sd"> None values can be specified for scalar Tensors or ones that don&#39;t require</span>
495-
<span class="sd"> grad. If a None value would be acceptable for all grad_tensors, then this</span>
496-
<span class="sd"> argument is optional.</span>
492+
<span class="sd"> grad_tensors (Sequence[Tensor or None] or Tensor, optional): The &quot;vector&quot; in</span>
493+
<span class="sd"> the Jacobian-vector product, usually gradients w.r.t. each element of</span>
494+
<span class="sd"> corresponding tensors. None values can be specified for scalar Tensors or</span>
495+
<span class="sd"> ones that don&#39;t require grad. If a None value would be acceptable for all</span>
496+
<span class="sd"> grad_tensors, then this argument is optional.</span>
497497
<span class="sd"> retain_graph (bool, optional): If ``False``, the graph used to compute the grad</span>
498498
<span class="sd"> will be freed. Note that in nearly all cases setting this option to ``True``</span>
499499
<span class="sd"> is not needed and often can be worked around in a much more efficient</span>
500500
<span class="sd"> way. Defaults to the value of ``create_graph``.</span>
501501
<span class="sd"> create_graph (bool, optional): If ``True``, graph of the derivative will</span>
502502
<span class="sd"> be constructed, allowing to compute higher order derivative products.</span>
503503
<span class="sd"> Defaults to ``False``.</span>
504-
<span class="sd"> inputs (sequence of Tensor): Inputs w.r.t. which the gradient will be</span>
505-
<span class="sd"> accumulated into ``.grad``. All other Tensors will be ignored. If not</span>
506-
<span class="sd"> provided, the gradient is accumulated into all the leaf Tensors that were</span>
507-
<span class="sd"> used to compute the attr::tensors. All the provided inputs must be leaf</span>
504+
<span class="sd"> inputs (Sequence[Tensor] or Tensor, optional): Inputs w.r.t. which the gradient</span>
505+
<span class="sd"> be will accumulated into ``.grad``. All other Tensors will be ignored. If</span>
506+
<span class="sd"> not provided, the gradient is accumulated into all the leaf Tensors that</span>
507+
<span class="sd"> were used to compute the attr::tensors. All the provided inputs must be leaf</span>
508508
<span class="sd"> Tensors.</span>
509509
<span class="sd"> &quot;&quot;&quot;</span>
510510
<span class="k">if</span> <span class="n">grad_variables</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span><span class="p">:</span>
@@ -519,7 +519,8 @@ <h1>Source code for torch.autograd</h1><div class="highlight"><pre>
519519
<span class="k">raise</span> <span class="ne">RuntimeError</span><span class="p">(</span><span class="s2">&quot;&#39;inputs&#39; argument to backward() cannot be empty.&quot;</span><span class="p">)</span>
520520

521521
<span class="n">tensors</span> <span class="o">=</span> <span class="p">(</span><span class="n">tensors</span><span class="p">,)</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">tensors</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">)</span> <span class="k">else</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span>
522-
<span class="n">inputs</span> <span class="o">=</span> <span class="nb">tuple</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span> <span class="k">if</span> <span class="n">inputs</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="nb">tuple</span><span class="p">()</span>
522+
<span class="n">inputs</span> <span class="o">=</span> <span class="p">(</span><span class="n">inputs</span><span class="p">,)</span> <span class="k">if</span> <span class="nb">isinstance</span><span class="p">(</span><span class="n">inputs</span><span class="p">,</span> <span class="n">torch</span><span class="o">.</span><span class="n">Tensor</span><span class="p">)</span> <span class="k">else</span> \
523+
<span class="nb">tuple</span><span class="p">(</span><span class="n">inputs</span><span class="p">)</span> <span class="k">if</span> <span class="n">inputs</span> <span class="ow">is</span> <span class="ow">not</span> <span class="kc">None</span> <span class="k">else</span> <span class="nb">tuple</span><span class="p">()</span>
523524

524525
<span class="n">grad_tensors_</span> <span class="o">=</span> <span class="n">_tensor_or_tensors_to_tuple</span><span class="p">(</span><span class="n">grad_tensors</span><span class="p">,</span> <span class="nb">len</span><span class="p">(</span><span class="n">tensors</span><span class="p">))</span>
525526
<span class="n">grad_tensors_</span> <span class="o">=</span> <span class="n">_make_grads</span><span class="p">(</span><span class="n">tensors</span><span class="p">,</span> <span class="n">grad_tensors_</span><span class="p">)</span>

docs/master/_modules/torch/autograd/anomaly_mode.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

docs/master/_modules/torch/autograd/function.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,7 @@
188188

189189

190190
<div class="version">
191-
master (1.9.0a0+git4ec6b36 )
191+
master (1.9.0a0+git49244d5 )
192192
</div>
193193

194194

0 commit comments

Comments
 (0)