Skip to content

Commit 26aa74f

Browse files
committed
auto-generating sphinx docs
1 parent d69fc4a commit 26aa74f

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

docs/master/_modules/torch/functional.html

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -373,7 +373,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
373373
<span class="p">]</span>
374374

375375

376-
<div class="viewcode-block" id="broadcast_tensors"><a class="viewcode-back" href="../../generated/torch.broadcast_tensors.html#torch.broadcast_tensors">[docs]</a><span class="k">def</span> <span class="nf">broadcast_tensors</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
376+
<span class="k">def</span> <span class="nf">broadcast_tensors</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
377377
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;broadcast_tensors(*tensors) -&gt; List of Tensors</span>
378378

379379
<span class="sd"> Broadcasts the given tensors according to :ref:`broadcasting-semantics`.</span>
@@ -402,7 +402,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
402402
<span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
403403
<span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
404404
<span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">broadcast_tensors</span><span class="p">,</span> <span class="n">tensors</span><span class="p">,</span> <span class="o">*</span><span class="n">tensors</span><span class="p">)</span>
405-
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">broadcast_tensors</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span></div>
405+
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">broadcast_tensors</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span>
406406

407407

408408
<div class="viewcode-block" id="split"><a class="viewcode-back" href="../../generated/torch.split.html#torch.split">[docs]</a><span class="k">def</span> <span class="nf">split</span><span class="p">(</span><span class="n">tensor</span><span class="p">,</span> <span class="n">split_size_or_sections</span><span class="p">,</span> <span class="n">dim</span><span class="o">=</span><span class="mi">0</span><span class="p">):</span>
@@ -580,7 +580,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
580580
<span class="k">return</span> <span class="n">P</span><span class="p">,</span> <span class="n">L</span><span class="p">,</span> <span class="n">U</span>
581581

582582

583-
<span class="k">def</span> <span class="nf">einsum</span><span class="p">(</span><span class="n">equation</span><span class="p">,</span> <span class="o">*</span><span class="n">operands</span><span class="p">):</span>
583+
<div class="viewcode-block" id="einsum"><a class="viewcode-back" href="../../generated/torch.einsum.html#torch.einsum">[docs]</a><span class="k">def</span> <span class="nf">einsum</span><span class="p">(</span><span class="n">equation</span><span class="p">,</span> <span class="o">*</span><span class="n">operands</span><span class="p">):</span>
584584
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;einsum(equation, *operands) -&gt; Tensor</span>
585585

586586
<span class="sd">This function provides a way of computing multilinear expressions (i.e. sums of products) using the</span>
@@ -664,7 +664,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
664664
<span class="c1"># in the original implementation this line is omitted</span>
665665
<span class="k">return</span> <span class="n">einsum</span><span class="p">(</span><span class="n">equation</span><span class="p">,</span> <span class="o">*</span><span class="n">operands</span><span class="p">)</span>
666666

667-
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">einsum</span><span class="p">(</span><span class="n">equation</span><span class="p">,</span> <span class="n">operands</span><span class="p">)</span>
667+
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">einsum</span><span class="p">(</span><span class="n">equation</span><span class="p">,</span> <span class="n">operands</span><span class="p">)</span></div>
668668

669669

670670
<span class="k">def</span> <span class="nf">meshgrid</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
@@ -1198,7 +1198,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
11981198
<span class="n">dims_b</span> <span class="o">=</span> <span class="nb">list</span><span class="p">(</span><span class="nb">range</span><span class="p">(</span><span class="n">dims</span><span class="p">))</span>
11991199
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">tensordot</span><span class="p">(</span><span class="n">a</span><span class="p">,</span> <span class="n">b</span><span class="p">,</span> <span class="n">dims_a</span><span class="p">,</span> <span class="n">dims_b</span><span class="p">)</span></div>
12001200

1201-
<div class="viewcode-block" id="cartesian_prod"><a class="viewcode-back" href="../../generated/torch.cartesian_prod.html#torch.cartesian_prod">[docs]</a><span class="k">def</span> <span class="nf">cartesian_prod</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
1201+
<span class="k">def</span> <span class="nf">cartesian_prod</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
12021202
<span class="sd">&quot;&quot;&quot;Do cartesian product of the given sequence of tensors. The behavior is similar to</span>
12031203
<span class="sd"> python&#39;s `itertools.product`.</span>
12041204

@@ -1229,9 +1229,9 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
12291229
<span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
12301230
<span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
12311231
<span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">cartesian_prod</span><span class="p">,</span> <span class="n">tensors</span><span class="p">,</span> <span class="o">*</span><span class="n">tensors</span><span class="p">)</span>
1232-
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">cartesian_prod</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span></div>
1232+
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">cartesian_prod</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span>
12331233

1234-
<div class="viewcode-block" id="block_diag"><a class="viewcode-back" href="../../generated/torch.block_diag.html#torch.block_diag">[docs]</a><span class="k">def</span> <span class="nf">block_diag</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
1234+
<span class="k">def</span> <span class="nf">block_diag</span><span class="p">(</span><span class="o">*</span><span class="n">tensors</span><span class="p">):</span>
12351235
<span class="sd">&quot;&quot;&quot;Create a block diagonal matrix from provided tensors.</span>
12361236

12371237
<span class="sd"> Arguments:</span>
@@ -1263,10 +1263,10 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
12631263
<span class="sd"> &quot;&quot;&quot;</span>
12641264
<span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">tensors</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">tensors</span><span class="p">):</span>
12651265
<span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">block_diag</span><span class="p">,</span> <span class="n">tensors</span><span class="p">,</span> <span class="o">*</span><span class="n">tensors</span><span class="p">)</span>
1266-
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">_C</span><span class="o">.</span><span class="n">_VariableFunctions</span><span class="o">.</span><span class="n">block_diag</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span></div>
1266+
<span class="k">return</span> <span class="n">torch</span><span class="o">.</span><span class="n">_C</span><span class="o">.</span><span class="n">_VariableFunctions</span><span class="o">.</span><span class="n">block_diag</span><span class="p">(</span><span class="n">tensors</span><span class="p">)</span>
12671267

12681268

1269-
<div class="viewcode-block" id="cdist"><a class="viewcode-back" href="../../generated/torch.cdist.html#torch.cdist">[docs]</a><span class="k">def</span> <span class="nf">cdist</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="mf">2.</span><span class="p">,</span> <span class="n">compute_mode</span><span class="o">=</span><span class="s1">&#39;use_mm_for_euclid_dist_if_necessary&#39;</span><span class="p">):</span>
1269+
<span class="k">def</span> <span class="nf">cdist</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="o">=</span><span class="mf">2.</span><span class="p">,</span> <span class="n">compute_mode</span><span class="o">=</span><span class="s1">&#39;use_mm_for_euclid_dist_if_necessary&#39;</span><span class="p">):</span>
12701270
<span class="c1"># type: (Tensor, Tensor, float, str) -&gt; (Tensor)</span>
12711271
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Computes batched the p-norm distance between each pair of the two collections of row vectors.</span>
12721272

@@ -1319,7 +1319,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
13191319
<span class="k">elif</span> <span class="n">compute_mode</span> <span class="o">==</span> <span class="s1">&#39;donot_use_mm_for_euclid_dist&#39;</span><span class="p">:</span>
13201320
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">cdist</span><span class="p">(</span><span class="n">x1</span><span class="p">,</span> <span class="n">x2</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="mi">2</span><span class="p">)</span>
13211321
<span class="k">else</span><span class="p">:</span>
1322-
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;</span><span class="si">{}</span><span class="s2"> is not a valid value for compute_mode&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">compute_mode</span><span class="p">))</span></div>
1322+
<span class="k">raise</span> <span class="ne">ValueError</span><span class="p">(</span><span class="s2">&quot;</span><span class="si">{}</span><span class="s2"> is not a valid value for compute_mode&quot;</span><span class="o">.</span><span class="n">format</span><span class="p">(</span><span class="n">compute_mode</span><span class="p">))</span>
13231323

13241324
<span class="c1"># TODO: type dim as BroadcastingList when https://github.com/pytorch/pytorch/issues/33782 is fixed</span>
13251325
<span class="nd">@overload</span> <span class="c1"># noqa: 749</span>
@@ -1469,7 +1469,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
14691469
<span class="k">else</span><span class="p">:</span>
14701470
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">norm</span><span class="p">(</span><span class="nb">input</span><span class="p">,</span> <span class="n">p</span><span class="p">,</span> <span class="n">_dim</span><span class="p">,</span> <span class="n">keepdim</span><span class="o">=</span><span class="n">keepdim</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">dtype</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="n">out</span><span class="p">)</span></div>
14711471

1472-
<div class="viewcode-block" id="chain_matmul"><a class="viewcode-back" href="../../generated/torch.chain_matmul.html#torch.chain_matmul">[docs]</a><span class="k">def</span> <span class="nf">chain_matmul</span><span class="p">(</span><span class="o">*</span><span class="n">matrices</span><span class="p">):</span>
1472+
<span class="k">def</span> <span class="nf">chain_matmul</span><span class="p">(</span><span class="o">*</span><span class="n">matrices</span><span class="p">):</span>
14731473
<span class="sa">r</span><span class="sd">&quot;&quot;&quot;Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed</span>
14741474
<span class="sd"> using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms</span>
14751475
<span class="sd"> of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`</span>
@@ -1501,7 +1501,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
15011501
<span class="k">if</span> <span class="ow">not</span> <span class="n">torch</span><span class="o">.</span><span class="n">jit</span><span class="o">.</span><span class="n">is_scripting</span><span class="p">():</span>
15021502
<span class="k">if</span> <span class="nb">any</span><span class="p">(</span><span class="nb">type</span><span class="p">(</span><span class="n">t</span><span class="p">)</span> <span class="ow">is</span> <span class="ow">not</span> <span class="n">Tensor</span> <span class="k">for</span> <span class="n">t</span> <span class="ow">in</span> <span class="n">matrices</span><span class="p">)</span> <span class="ow">and</span> <span class="n">has_torch_function</span><span class="p">(</span><span class="n">matrices</span><span class="p">):</span>
15031503
<span class="k">return</span> <span class="n">handle_torch_function</span><span class="p">(</span><span class="n">chain_matmul</span><span class="p">,</span> <span class="n">matrices</span><span class="p">,</span> <span class="o">*</span><span class="n">matrices</span><span class="p">)</span>
1504-
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">chain_matmul</span><span class="p">(</span><span class="n">matrices</span><span class="p">)</span></div>
1504+
<span class="k">return</span> <span class="n">_VF</span><span class="o">.</span><span class="n">chain_matmul</span><span class="p">(</span><span class="n">matrices</span><span class="p">)</span>
15051505

15061506

15071507
<span class="k">def</span> <span class="nf">_lu_impl</span><span class="p">(</span><span class="n">A</span><span class="p">,</span> <span class="n">pivot</span><span class="o">=</span><span class="kc">True</span><span class="p">,</span> <span class="n">get_infos</span><span class="o">=</span><span class="kc">False</span><span class="p">,</span> <span class="n">out</span><span class="o">=</span><span class="kc">None</span><span class="p">):</span>

0 commit comments

Comments
 (0)