|
235 | 235 | <div class="pytorch-left-menu-search">
|
236 | 236 |
|
237 | 237 | <div class="version">
|
238 |
| - <a href='https://pytorch.org/docs/versions.html'>master (1.14.0a0+git876b702 ) ▼</a> |
| 238 | + <a href='https://pytorch.org/docs/versions.html'>master (2.0.0a0+git9eccfed ) ▼</a> |
239 | 239 | </div>
|
240 | 240 |
|
241 | 241 |
|
@@ -603,20 +603,46 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
|
603 | 603 | <span class="n">kernel32</span><span class="o">.</span><span class="n">SetErrorMode</span><span class="p">(</span><span class="n">prev_error_mode</span><span class="p">)</span>
|
604 | 604 |
|
605 | 605 |
|
| 606 | +<span class="k">def</span> <span class="nf">_preload_cuda_deps</span><span class="p">():</span> |
| 607 | + <span class="sd">""" Preloads cudnn/cublas deps if they could not be found otherwise """</span> |
| 608 | + <span class="c1"># Should only be called on Linux if default path resolution have failed</span> |
| 609 | + <span class="k">assert</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">==</span> <span class="s1">'Linux'</span><span class="p">,</span> <span class="s1">'Should only be called on Linux'</span> |
| 610 | + <span class="k">for</span> <span class="n">path</span> <span class="ow">in</span> <span class="n">sys</span><span class="o">.</span><span class="n">path</span><span class="p">:</span> |
| 611 | + <span class="n">nvidia_path</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">path</span><span class="p">,</span> <span class="s1">'nvidia'</span><span class="p">)</span> |
| 612 | + <span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">nvidia_path</span><span class="p">):</span> |
| 613 | + <span class="k">continue</span> |
| 614 | + <span class="n">cublas_path</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">nvidia_path</span><span class="p">,</span> <span class="s1">'cublas'</span><span class="p">,</span> <span class="s1">'lib'</span><span class="p">,</span> <span class="s1">'libcublas.so.11'</span><span class="p">)</span> |
| 615 | + <span class="n">cudnn_path</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">nvidia_path</span><span class="p">,</span> <span class="s1">'cudnn'</span><span class="p">,</span> <span class="s1">'lib'</span><span class="p">,</span> <span class="s1">'libcudnn.so.8'</span><span class="p">)</span> |
| 616 | + <span class="k">if</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">cublas_path</span><span class="p">)</span> <span class="ow">or</span> <span class="ow">not</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">exists</span><span class="p">(</span><span class="n">cudnn_path</span><span class="p">):</span> |
| 617 | + <span class="k">continue</span> |
| 618 | + <span class="k">break</span> |
| 619 | + |
| 620 | + <span class="n">ctypes</span><span class="o">.</span><span class="n">CDLL</span><span class="p">(</span><span class="n">cublas_path</span><span class="p">)</span> |
| 621 | + <span class="n">ctypes</span><span class="o">.</span><span class="n">CDLL</span><span class="p">(</span><span class="n">cudnn_path</span><span class="p">)</span> |
| 622 | + |
| 623 | + |
606 | 624 | <span class="c1"># See Note [Global dependencies]</span>
|
607 | 625 | <span class="k">def</span> <span class="nf">_load_global_deps</span><span class="p">():</span>
|
608 |
| - <span class="k">if</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">==</span> <span class="s1">'Windows'</span> <span class="ow">or</span> <span class="n">sys</span><span class="o">.</span><span class="n">executable</span> <span class="o">==</span> <span class="s1">'torch_deploy'</span><span class="p">:</span> |
| 626 | + <span class="k">if</span> <span class="n">sys</span><span class="o">.</span><span class="n">executable</span> <span class="o">==</span> <span class="s1">'torch_deploy'</span> <span class="ow">or</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">==</span> <span class="s1">'Windows'</span><span class="p">:</span> |
609 | 627 | <span class="k">return</span>
|
610 | 628 |
|
611 | 629 | <span class="n">lib_name</span> <span class="o">=</span> <span class="s1">'libtorch_global_deps'</span> <span class="o">+</span> <span class="p">(</span><span class="s1">'.dylib'</span> <span class="k">if</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">==</span> <span class="s1">'Darwin'</span> <span class="k">else</span> <span class="s1">'.so'</span><span class="p">)</span>
|
612 | 630 | <span class="n">here</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">abspath</span><span class="p">(</span><span class="vm">__file__</span><span class="p">)</span>
|
613 | 631 | <span class="n">lib_path</span> <span class="o">=</span> <span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">join</span><span class="p">(</span><span class="n">os</span><span class="o">.</span><span class="n">path</span><span class="o">.</span><span class="n">dirname</span><span class="p">(</span><span class="n">here</span><span class="p">),</span> <span class="s1">'lib'</span><span class="p">,</span> <span class="n">lib_name</span><span class="p">)</span>
|
614 | 632 |
|
615 |
| - <span class="n">ctypes</span><span class="o">.</span><span class="n">CDLL</span><span class="p">(</span><span class="n">lib_path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="n">ctypes</span><span class="o">.</span><span class="n">RTLD_GLOBAL</span><span class="p">)</span> |
| 633 | + <span class="k">try</span><span class="p">:</span> |
| 634 | + <span class="n">ctypes</span><span class="o">.</span><span class="n">CDLL</span><span class="p">(</span><span class="n">lib_path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="n">ctypes</span><span class="o">.</span><span class="n">RTLD_GLOBAL</span><span class="p">)</span> |
| 635 | + <span class="k">except</span> <span class="ne">OSError</span> <span class="k">as</span> <span class="n">err</span><span class="p">:</span> |
| 636 | + <span class="c1"># Can only happen of wheel with cublas as PYPI deps</span> |
| 637 | + <span class="c1"># As PyTorch is not purelib, but nvidia-cublas-cu11 is</span> |
| 638 | + <span class="k">if</span> <span class="s1">'libcublas.so.11'</span> <span class="ow">not</span> <span class="ow">in</span> <span class="n">err</span><span class="o">.</span><span class="n">args</span><span class="p">[</span><span class="mi">0</span><span class="p">]:</span> |
| 639 | + <span class="k">raise</span> <span class="n">err</span> |
| 640 | + <span class="n">_preload_cuda_deps</span><span class="p">()</span> |
| 641 | + <span class="n">ctypes</span><span class="o">.</span><span class="n">CDLL</span><span class="p">(</span><span class="n">lib_path</span><span class="p">,</span> <span class="n">mode</span><span class="o">=</span><span class="n">ctypes</span><span class="o">.</span><span class="n">RTLD_GLOBAL</span><span class="p">)</span> |
616 | 642 |
|
617 | 643 |
|
618 | 644 | <span class="k">if</span> <span class="p">(</span><span class="n">USE_RTLD_GLOBAL_WITH_LIBTORCH</span> <span class="ow">or</span> <span class="n">os</span><span class="o">.</span><span class="n">getenv</span><span class="p">(</span><span class="s1">'TORCH_USE_RTLD_GLOBAL'</span><span class="p">))</span> <span class="ow">and</span> \
|
619 |
| - <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">!=</span> <span class="s1">'Windows'</span><span class="p">:</span> |
| 645 | + <span class="p">(</span><span class="n">sys</span><span class="o">.</span><span class="n">executable</span> <span class="o">==</span> <span class="s2">"torch_deploy"</span> <span class="ow">or</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">!=</span> <span class="s1">'Windows'</span><span class="p">):</span> |
620 | 646 | <span class="c1"># Do it the hard way. You might want to load libtorch with RTLD_GLOBAL in a</span>
|
621 | 647 | <span class="c1"># few circumstances:</span>
|
622 | 648 | <span class="c1">#</span>
|
@@ -1391,7 +1417,7 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
|
1391 | 1417 | <span class="c1">################################################################################</span>
|
1392 | 1418 |
|
1393 | 1419 | <span class="k">def</span> <span class="nf">manager_path</span><span class="p">():</span>
|
1394 |
| - <span class="k">if</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">==</span> <span class="s1">'Windows'</span> <span class="ow">or</span> <span class="n">sys</span><span class="o">.</span><span class="n">executable</span> <span class="o">==</span> <span class="s1">'torch_deploy'</span><span class="p">:</span> |
| 1420 | + <span class="k">if</span> <span class="n">sys</span><span class="o">.</span><span class="n">executable</span> <span class="o">==</span> <span class="s1">'torch_deploy'</span> <span class="ow">or</span> <span class="n">platform</span><span class="o">.</span><span class="n">system</span><span class="p">()</span> <span class="o">==</span> <span class="s1">'Windows'</span><span class="p">:</span> |
1395 | 1421 | <span class="k">return</span> <span class="sa">b</span><span class="s2">""</span>
|
1396 | 1422 | <span class="n">path</span> <span class="o">=</span> <span class="n">get_file_path</span><span class="p">(</span><span class="s1">'torch'</span><span class="p">,</span> <span class="s1">'bin'</span><span class="p">,</span> <span class="s1">'torch_shm_manager'</span><span class="p">)</span>
|
1397 | 1423 | <span class="n">prepare_multiprocessing_environment</span><span class="p">(</span><span class="n">get_file_path</span><span class="p">(</span><span class="s1">'torch'</span><span class="p">))</span>
|
@@ -1608,6 +1634,7 @@ <h1>Source code for torch</h1><div class="highlight"><pre>
|
1608 | 1634 | <span class="sd"> return torch.sin(x) + torch.cos(x)</span>
|
1609 | 1635 |
|
1610 | 1636 | <span class="sd"> """</span>
|
| 1637 | + <span class="n">_C</span><span class="o">.</span><span class="n">_log_api_usage_once</span><span class="p">(</span><span class="s2">"torch.compile"</span><span class="p">)</span> |
1611 | 1638 | <span class="c1"># Decorator mode</span>
|
1612 | 1639 | <span class="k">if</span> <span class="n">model</span> <span class="ow">is</span> <span class="kc">None</span><span class="p">:</span>
|
1613 | 1640 | <span class="k">def</span> <span class="nf">fn</span><span class="p">(</span><span class="n">model</span><span class="p">:</span> <span class="n">Callable</span><span class="p">):</span>
|
|
0 commit comments