187
187
188
188
189
189
< div class ="version ">
190
- < a href ='http://pytorch.org/docs/versions.html '> 1.8.0a0+4b6fea9 ▼</ a >
190
+ < a href ='http://pytorch.org/docs/versions.html '> 1.8.0a0+a8da1dd ▼</ a >
191
191
</ div >
192
192
193
193
233
233
< li class ="toctree-l1 "> < a class ="reference internal " href ="../notes/extending.html "> Extending PyTorch</ a > </ li >
234
234
< li class ="toctree-l1 "> < a class ="reference internal " href ="../notes/faq.html "> Frequently Asked Questions</ a > </ li >
235
235
< li class ="toctree-l1 "> < a class ="reference internal " href ="../notes/large_scale_deployments.html "> Features for large-scale deployments</ a > </ li >
236
+ < li class ="toctree-l1 "> < a class ="reference internal " href ="../notes/modules.html "> Modules</ a > </ li >
236
237
< li class ="toctree-l1 "> < a class ="reference internal " href ="../notes/multiprocessing.html "> Multiprocessing best practices</ a > </ li >
237
238
< li class ="toctree-l1 "> < a class ="reference internal " href ="../notes/randomness.html "> Reproducibility</ a > </ li >
238
239
< li class ="toctree-l1 "> < a class ="reference internal " href ="../notes/serialization.html "> Serialization semantics</ a > </ li >
@@ -654,7 +655,7 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
654
655
< span class ="k "> return</ span > < span class ="nb "> type</ span > < span class ="p "> (</ span > < span class ="n "> obj</ span > < span class ="p "> )</ span > < span class ="ow "> in</ span > < span class ="n "> _storage_classes</ span > </ div >
655
656
656
657
657
- < div class =" viewcode-block " id =" set_default_tensor_type " > < a class =" viewcode-back " href =" ../generated/torch.set_default_tensor_type.html#torch.set_default_tensor_type " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> set_default_tensor_type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> ):</ span >
658
+ < span class ="k "> def</ span > < span class ="nf "> set_default_tensor_type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> ):</ span >
658
659
< span class ="sa "> r</ span > < span class ="sd "> """Sets the default ``torch.Tensor`` type to floating point tensor type</ span >
659
660
< span class ="sd "> ``t``. This type will also be used as default floating point type for</ span >
660
661
< span class ="sd "> type inference in :func:`torch.tensor`.</ span >
@@ -675,10 +676,10 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
675
676
< span class ="sd "> """</ span >
676
677
< span class ="k "> if</ span > < span class ="nb "> isinstance</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> ,</ span > < span class ="n "> _string_classes</ span > < span class ="p "> ):</ span >
677
678
< span class ="n "> t</ span > < span class ="o "> =</ span > < span class ="n "> _import_dotted_name</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> )</ span >
678
- < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _set_default_tensor_type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> )</ span > </ div >
679
+ < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _set_default_tensor_type</ span > < span class ="p "> (</ span > < span class ="n "> t</ span > < span class ="p "> )</ span >
679
680
680
681
681
- < div class =" viewcode-block " id =" set_default_dtype " > < a class =" viewcode-back " href =" ../generated/torch.set_default_dtype.html#torch.set_default_dtype " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> set_default_dtype</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> ):</ span >
682
+ < span class ="k "> def</ span > < span class ="nf "> set_default_dtype</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> ):</ span >
682
683
< span class ="sa "> r</ span > < span class ="sd "> """Sets the default floating point dtype to :attr:`d`.</ span >
683
684
< span class ="sd "> This dtype is:</ span >
684
685
@@ -706,7 +707,7 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
706
707
< span class ="sd "> torch.complex128</ span >
707
708
708
709
< span class ="sd "> """</ span >
709
- < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _set_default_dtype</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> )</ span > </ div >
710
+ < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _set_default_dtype</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> )</ span >
710
711
711
712
< span class ="k "> def</ span > < span class ="nf "> use_deterministic_algorithms</ span > < span class ="p "> (</ span > < span class ="n "> d</ span > < span class ="p "> ):</ span >
712
713
< span class ="sa "> r</ span > < span class ="sd "> """ Sets whether PyTorch operations must use "deterministic"</ span >
@@ -730,6 +731,10 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
730
731
< span class ="sd "> * :class:`torch.nn.ConvTranspose2d` when called on CUDA tensor</ span >
731
732
< span class ="sd "> * :class:`torch.nn.ConvTranspose3d` when called on CUDA tensor</ span >
732
733
< span class ="sd "> * :func:`torch.bmm` when called on sparse-dense CUDA tensors</ span >
734
+ < span class ="sd "> * :func:`torch.__getitem__` backward when `self` is a CPU tensor and</ span >
735
+ < span class ="sd "> ``indices`` is a list of tensors</ span >
736
+ < span class ="sd "> * :func:`torch.index_put` with ``accumulate=True`` when called on a CPU</ span >
737
+ < span class ="sd "> tensor</ span >
733
738
734
739
< span class ="sd "> The following normally-nondeterministic operations will throw a</ span >
735
740
< span class ="sd "> :class:`RuntimeError` when `d=True`:</ span >
@@ -957,14 +962,14 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
957
962
< span class ="c1 "> ################################################################################</ span >
958
963
959
964
< span class ="c1 "> # needs to be before the submodule imports to avoid circular dependencies</ span >
960
- < span class ="k "> def</ span > < span class ="nf "> _assert</ span > < span class ="p "> (</ span > < span class ="n "> condition</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="p "> ):</ span >
965
+ < div class =" viewcode-block " id =" _assert " > < a class =" viewcode-back " href =" ../generated/torch._assert.html#torch._assert " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> _assert</ span > < span class ="p "> (</ span > < span class ="n "> condition</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="p "> ):</ span >
961
966
< span class ="sa "> r</ span > < span class ="sd "> """A wrapper around Python's assert which is symbolically traceable.</ span >
962
967
< span class ="sd "> """</ span >
963
968
< span class ="kn "> from</ span > < span class ="nn "> .overrides</ span > < span class ="kn "> import</ span > < span class ="n "> has_torch_function</ span > < span class ="p "> ,</ span > < span class ="n "> handle_torch_function</ span >
964
969
965
970
< span class ="k "> if</ span > < span class ="nb "> type</ span > < span class ="p "> (</ span > < span class ="n "> condition</ span > < span class ="p "> )</ span > < span class ="ow "> is</ span > < span class ="ow "> not</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> Tensor</ span > < span class ="ow "> and</ span > < span class ="n "> has_torch_function</ span > < span class ="p "> ((</ span > < span class ="n "> condition</ span > < span class ="p "> ,)):</ span >
966
971
< span class ="k "> return</ span > < span class ="n "> handle_torch_function</ span > < span class ="p "> (</ span > < span class ="n "> _assert</ span > < span class ="p "> ,</ span > < span class ="p "> (</ span > < span class ="n "> condition</ span > < span class ="p "> ,),</ span > < span class ="n "> condition</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > < span class ="p "> )</ span >
967
- < span class ="k "> assert</ span > < span class ="n "> condition</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span >
972
+ < span class ="k "> assert</ span > < span class ="n "> condition</ span > < span class ="p "> ,</ span > < span class ="n "> message</ span > </ div >
968
973
969
974
< span class ="c1 "> ################################################################################</ span >
970
975
< span class ="c1 "> # Import most common subpackages</ span >
@@ -1009,9 +1014,9 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
1009
1014
< span class ="k "> del</ span > < span class ="n "> _torch_docs</ span > < span class ="p "> ,</ span > < span class ="n "> _tensor_docs</ span > < span class ="p "> ,</ span > < span class ="n "> _storage_docs</ span >
1010
1015
1011
1016
1012
- < div class =" viewcode-block " id =" compiled_with_cxx11_abi " > < a class =" viewcode-back " href =" ../generated/torch.compiled_with_cxx11_abi.html#torch.compiled_with_cxx11_abi " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> compiled_with_cxx11_abi</ span > < span class ="p "> ():</ span >
1017
+ < span class ="k "> def</ span > < span class ="nf "> compiled_with_cxx11_abi</ span > < span class ="p "> ():</ span >
1013
1018
< span class ="sa "> r</ span > < span class ="sd "> """Returns whether PyTorch was built with _GLIBCXX_USE_CXX11_ABI=1"""</ span >
1014
- < span class ="k "> return</ span > < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _GLIBCXX_USE_CXX11_ABI</ span > </ div >
1019
+ < span class ="k "> return</ span > < span class ="n "> _C</ span > < span class ="o "> .</ span > < span class ="n "> _GLIBCXX_USE_CXX11_ABI</ span >
1015
1020
1016
1021
1017
1022
< span class ="c1 "> # Import the ops "namespace"</ span >
@@ -1039,7 +1044,7 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
1039
1044
1040
1045
< span class ="c1 "> # These were previously defined in native_functions.yaml and appeared on the</ span >
1041
1046
< span class ="c1 "> # `torch` namespace, but we moved them to c10 dispatch to facilitate custom</ span >
1042
- < span class ="c1 "> # class usage. We add these lines here to preserve backward compatbility .</ span >
1047
+ < span class ="c1 "> # class usage. We add these lines here to preserve backward compatibility .</ span >
1043
1048
< span class ="n "> quantized_lstm</ span > < span class ="o "> =</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> ops</ span > < span class ="o "> .</ span > < span class ="n "> aten</ span > < span class ="o "> .</ span > < span class ="n "> quantized_lstm</ span >
1044
1049
< span class ="n "> quantized_gru</ span > < span class ="o "> =</ span > < span class ="n "> torch</ span > < span class ="o "> .</ span > < span class ="n "> ops</ span > < span class ="o "> .</ span > < span class ="n "> aten</ span > < span class ="o "> .</ span > < span class ="n "> quantized_gru</ span >
1045
1050
</ pre > </ div >
0 commit comments