@@ -373,7 +373,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
373
373
< span class ="p "> ]</ span >
374
374
375
375
376
- < div class =" viewcode-block " id =" broadcast_tensors " > < a class =" viewcode-back " href =" ../../generated/torch.broadcast_tensors.html#torch.broadcast_tensors " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> broadcast_tensors</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
376
+ < span class ="k "> def</ span > < span class ="nf "> broadcast_tensors</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
377
377
< span class ="sa "> r</ span > < span class ="sd "> """broadcast_tensors(*tensors) -> List of Tensors</ span >
378
378
379
379
< span class ="sd "> Broadcasts the given tensors according to :ref:`broadcasting-semantics`.</ span >
@@ -402,7 +402,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
402
402
< span class ="k "> if</ span > < span class ="ow "> not</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> jit</ span > < span class ="o "> .</ span > < span class ="n "> is_scripting</ span > < span class ="p "> ():</ span >
403
403
< span class ="k "> if</ span > < span class ="nb "> any</ span > < span class ="p "> (</ span > < span class ="nb "> type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> )</ span > < span class ="ow "> is</ span > < span class ="ow "> not</ span > < span class ="n "> Tensor</ span > < span class ="k "> for</ span > < span class ="n "> t</ span > < span class ="ow "> in</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span > < span class ="ow "> and</ span > < span class ="n "> has_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
404
404
< span class ="k "> return</ span > < span class ="n "> handle_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> broadcast_tensors</ span > < span class ="p "> ,</ span > < span class ="n "> tensors</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span >
405
- < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> broadcast_tensors</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span > </ div >
405
+ < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> broadcast_tensors</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span >
406
406
407
407
408
408
< div class ="viewcode-block " id ="split "> < a class ="viewcode-back " href ="../../generated/torch.split.html#torch.split "> [docs]</ a > < span class ="k "> def</ span > < span class ="nf "> split</ span > < span class ="p "> (</ span > < span class ="n "> tensor</ span > < span class ="p "> ,</ span > < span class ="n "> split_size_or_sections</ span > < span class ="p "> ,</ span > < span class ="n "> dim</ span > < span class ="o "> =</ span > < span class ="mi "> 0</ span > < span class ="p "> ):</ span >
@@ -580,7 +580,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
580
580
< span class ="k "> return</ span > < span class ="n "> P</ span > < span class ="p "> ,</ span > < span class ="n "> L</ span > < span class ="p "> ,</ span > < span class ="n "> U</ span >
581
581
582
582
583
- < span class ="k "> def</ span > < span class ="nf "> einsum</ span > < span class ="p "> (</ span > < span class ="n "> equation</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> operands</ span > < span class ="p "> ):</ span >
583
+ < div class =" viewcode-block " id =" einsum " > < a class =" viewcode-back " href =" ../../generated/torch.einsum.html#torch.einsum " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> einsum</ span > < span class ="p "> (</ span > < span class ="n "> equation</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> operands</ span > < span class ="p "> ):</ span >
584
584
< span class ="sa "> r</ span > < span class ="sd "> """einsum(equation, *operands) -> Tensor</ span >
585
585
586
586
< span class ="sd "> This function provides a way of computing multilinear expressions (i.e. sums of products) using the</ span >
@@ -664,7 +664,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
664
664
< span class ="c1 "> # in the original implementation this line is omitted</ span >
665
665
< span class ="k "> return</ span > < span class ="n "> einsum</ span > < span class ="p "> (</ span > < span class ="n "> equation</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> operands</ span > < span class ="p "> )</ span >
666
666
667
- < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> einsum</ span > < span class ="p "> (</ span > < span class ="n "> equation</ span > < span class ="p "> ,</ span > < span class ="n "> operands</ span > < span class ="p "> )</ span >
667
+ < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> einsum</ span > < span class ="p "> (</ span > < span class ="n "> equation</ span > < span class ="p "> ,</ span > < span class ="n "> operands</ span > < span class ="p "> )</ span > </ div >
668
668
669
669
670
670
< span class ="k "> def</ span > < span class ="nf "> meshgrid</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
@@ -1198,7 +1198,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
1198
1198
< span class ="n "> dims_b</ span > < span class ="o "> =</ span > < span class ="nb "> list</ span > < span class ="p "> (</ span > < span class ="nb "> range</ span > < span class ="p "> (</ span > < span class ="n "> dims</ span > < span class ="p "> ))</ span >
1199
1199
< span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> tensordot</ span > < span class ="p "> (</ span > < span class ="n "> a</ span > < span class ="p "> ,</ span > < span class ="n "> b</ span > < span class ="p "> ,</ span > < span class ="n "> dims_a</ span > < span class ="p "> ,</ span > < span class ="n "> dims_b</ span > < span class ="p "> )</ span > </ div >
1200
1200
1201
- < div class =" viewcode-block " id =" cartesian_prod " > < a class =" viewcode-back " href =" ../../generated/torch.cartesian_prod.html#torch.cartesian_prod " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> cartesian_prod</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
1201
+ < span class ="k "> def</ span > < span class ="nf "> cartesian_prod</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
1202
1202
< span class ="sd "> """Do cartesian product of the given sequence of tensors. The behavior is similar to</ span >
1203
1203
< span class ="sd "> python's `itertools.product`.</ span >
1204
1204
@@ -1229,9 +1229,9 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
1229
1229
< span class ="k "> if</ span > < span class ="ow "> not</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> jit</ span > < span class ="o "> .</ span > < span class ="n "> is_scripting</ span > < span class ="p "> ():</ span >
1230
1230
< span class ="k "> if</ span > < span class ="nb "> any</ span > < span class ="p "> (</ span > < span class ="nb "> type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> )</ span > < span class ="ow "> is</ span > < span class ="ow "> not</ span > < span class ="n "> Tensor</ span > < span class ="k "> for</ span > < span class ="n "> t</ span > < span class ="ow "> in</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span > < span class ="ow "> and</ span > < span class ="n "> has_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
1231
1231
< span class ="k "> return</ span > < span class ="n "> handle_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> cartesian_prod</ span > < span class ="p "> ,</ span > < span class ="n "> tensors</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span >
1232
- < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> cartesian_prod</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span > </ div >
1232
+ < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> cartesian_prod</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span >
1233
1233
1234
- < div class =" viewcode-block " id =" block_diag " > < a class =" viewcode-back " href =" ../../generated/torch.block_diag.html#torch.block_diag " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> block_diag</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
1234
+ < span class ="k "> def</ span > < span class ="nf "> block_diag</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
1235
1235
< span class ="sd "> """Create a block diagonal matrix from provided tensors.</ span >
1236
1236
1237
1237
< span class ="sd "> Arguments:</ span >
@@ -1263,10 +1263,10 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
1263
1263
< span class ="sd "> """</ span >
1264
1264
< span class ="k "> if</ span > < span class ="nb "> any</ span > < span class ="p "> (</ span > < span class ="nb "> type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> )</ span > < span class ="ow "> is</ span > < span class ="ow "> not</ span > < span class ="n "> Tensor</ span > < span class ="k "> for</ span > < span class ="n "> t</ span > < span class ="ow "> in</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span > < span class ="ow "> and</ span > < span class ="n "> has_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> ):</ span >
1265
1265
< span class ="k "> return</ span > < span class ="n "> handle_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> block_diag</ span > < span class ="p "> ,</ span > < span class ="n "> tensors</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span >
1266
- < span class ="k "> return</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _VariableFunctions</ span > < span class ="o "> .</ span > < span class ="n "> block_diag</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span > </ div >
1266
+ < span class ="k "> return</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _VariableFunctions</ span > < span class ="o "> .</ span > < span class ="n "> block_diag</ span > < span class ="p "> (</ span > < span class ="n "> tensors</ span > < span class ="p "> )</ span >
1267
1267
1268
1268
1269
- < div class =" viewcode-block " id =" cdist " > < a class =" viewcode-back " href =" ../../generated/torch.cdist.html#torch.cdist " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> cdist</ span > < span class ="p "> (</ span > < span class ="n "> x1</ span > < span class ="p "> ,</ span > < span class ="n "> x2</ span > < span class ="p "> ,</ span > < span class ="n "> p</ span > < span class ="o "> =</ span > < span class ="mf "> 2.</ span > < span class ="p "> ,</ span > < span class ="n "> compute_mode</ span > < span class ="o "> =</ span > < span class ="s1 "> 'use_mm_for_euclid_dist_if_necessary'</ span > < span class ="p "> ):</ span >
1269
+ < span class ="k "> def</ span > < span class ="nf "> cdist</ span > < span class ="p "> (</ span > < span class ="n "> x1</ span > < span class ="p "> ,</ span > < span class ="n "> x2</ span > < span class ="p "> ,</ span > < span class ="n "> p</ span > < span class ="o "> =</ span > < span class ="mf "> 2.</ span > < span class ="p "> ,</ span > < span class ="n "> compute_mode</ span > < span class ="o "> =</ span > < span class ="s1 "> 'use_mm_for_euclid_dist_if_necessary'</ span > < span class ="p "> ):</ span >
1270
1270
< span class ="c1 "> # type: (Tensor, Tensor, float, str) -> (Tensor)</ span >
1271
1271
< span class ="sa "> r</ span > < span class ="sd "> """Computes batched the p-norm distance between each pair of the two collections of row vectors.</ span >
1272
1272
@@ -1319,7 +1319,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
1319
1319
< span class ="k "> elif</ span > < span class ="n "> compute_mode</ span > < span class ="o "> ==</ span > < span class ="s1 "> 'donot_use_mm_for_euclid_dist'</ span > < span class ="p "> :</ span >
1320
1320
< span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> cdist</ span > < span class ="p "> (</ span > < span class ="n "> x1</ span > < span class ="p "> ,</ span > < span class ="n "> x2</ span > < span class ="p "> ,</ span > < span class ="n "> p</ span > < span class ="p "> ,</ span > < span class ="mi "> 2</ span > < span class ="p "> )</ span >
1321
1321
< span class ="k "> else</ span > < span class ="p "> :</ span >
1322
- < span class ="k "> raise</ span > < span class ="ne "> ValueError</ span > < span class ="p "> (</ span > < span class ="s2 "> "</ span > < span class ="si "> {}</ span > < span class ="s2 "> is not a valid value for compute_mode"</ span > < span class ="o "> .</ span > < span class ="n "> format</ span > < span class ="p "> (</ span > < span class ="n "> compute_mode</ span > < span class ="p "> ))</ span > </ div >
1322
+ < span class ="k "> raise</ span > < span class ="ne "> ValueError</ span > < span class ="p "> (</ span > < span class ="s2 "> "</ span > < span class ="si "> {}</ span > < span class ="s2 "> is not a valid value for compute_mode"</ span > < span class ="o "> .</ span > < span class ="n "> format</ span > < span class ="p "> (</ span > < span class ="n "> compute_mode</ span > < span class ="p "> ))</ span >
1323
1323
1324
1324
< span class ="c1 "> # TODO: type dim as BroadcastingList when https://github.com/pytorch/pytorch/issues/33782 is fixed</ span >
1325
1325
< span class ="nd "> @overload</ span > < span class ="c1 "> # noqa: 749</ span >
@@ -1469,7 +1469,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
1469
1469
< span class ="k "> else</ span > < span class ="p "> :</ span >
1470
1470
< span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> norm</ span > < span class ="p "> (</ span > < span class ="nb "> input</ span > < span class ="p "> ,</ span > < span class ="n "> p</ span > < span class ="p "> ,</ span > < span class ="n "> _dim</ span > < span class ="p "> ,</ span > < span class ="n "> keepdim</ span > < span class ="o "> =</ span > < span class ="n "> keepdim</ span > < span class ="p "> ,</ span > < span class ="n "> dtype</ span > < span class ="o "> =</ span > < span class ="n "> dtype</ span > < span class ="p "> ,</ span > < span class ="n "> out</ span > < span class ="o "> =</ span > < span class ="n "> out</ span > < span class ="p "> )</ span > </ div >
1471
1471
1472
- < div class =" viewcode-block " id =" chain_matmul " > < a class =" viewcode-back " href =" ../../generated/torch.chain_matmul.html#torch.chain_matmul " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> chain_matmul</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> matrices</ span > < span class ="p "> ):</ span >
1472
+ < span class ="k "> def</ span > < span class ="nf "> chain_matmul</ span > < span class ="p "> (</ span > < span class ="o "> *</ span > < span class ="n "> matrices</ span > < span class ="p "> ):</ span >
1473
1473
< span class ="sa "> r</ span > < span class ="sd "> """Returns the matrix product of the :math:`N` 2-D tensors. This product is efficiently computed</ span >
1474
1474
< span class ="sd "> using the matrix chain order algorithm which selects the order in which incurs the lowest cost in terms</ span >
1475
1475
< span class ="sd "> of arithmetic operations (`[CLRS]`_). Note that since this is a function to compute the product, :math:`N`</ span >
@@ -1501,7 +1501,7 @@ <h1>Source code for torch.functional</h1><div class="highlight"><pre>
1501
1501
< span class ="k "> if</ span > < span class ="ow "> not</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> jit</ span > < span class ="o "> .</ span > < span class ="n "> is_scripting</ span > < span class ="p "> ():</ span >
1502
1502
< span class ="k "> if</ span > < span class ="nb "> any</ span > < span class ="p "> (</ span > < span class ="nb "> type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> )</ span > < span class ="ow "> is</ span > < span class ="ow "> not</ span > < span class ="n "> Tensor</ span > < span class ="k "> for</ span > < span class ="n "> t</ span > < span class ="ow "> in</ span > < span class ="n "> matrices</ span > < span class ="p "> )</ span > < span class ="ow "> and</ span > < span class ="n "> has_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> matrices</ span > < span class ="p "> ):</ span >
1503
1503
< span class ="k "> return</ span > < span class ="n "> handle_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> chain_matmul</ span > < span class ="p "> ,</ span > < span class ="n "> matrices</ span > < span class ="p "> ,</ span > < span class ="o "> *</ span > < span class ="n "> matrices</ span > < span class ="p "> )</ span >
1504
- < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> chain_matmul</ span > < span class ="p "> (</ span > < span class ="n "> matrices</ span > < span class ="p "> )</ span > </ div >
1504
+ < span class ="k "> return</ span > < span class ="n "> _VF</ span > < span class ="o "> .</ span > < span class ="n "> chain_matmul</ span > < span class ="p "> (</ span > < span class ="n "> matrices</ span > < span class ="p "> )</ span >
1505
1505
1506
1506
1507
1507
< span class ="k "> def</ span > < span class ="nf "> _lu_impl</ span > < span class ="p "> (</ span > < span class ="n "> A</ span > < span class ="p "> ,</ span > < span class ="n "> pivot</ span > < span class ="o "> =</ span > < span class ="kc "> True</ span > < span class ="p "> ,</ span > < span class ="n "> get_infos</ span > < span class ="o "> =</ span > < span class ="kc "> False</ span > < span class ="p "> ,</ span > < span class ="n "> out</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ):</ span >
0 commit comments