@@ -373,7 +373,7 @@ <h1>Source code for torch.nn.parallel.data_parallel</h1><div class="highlight"><
373
373
< span class ="k "> return</ span >
374
374
375
375
376
- < div class =" viewcode-block " id =" DataParallel " > < a class =" viewcode-back " href =" ../../../../generated/torch.nn.DataParallel.html#torch.nn.DataParallel " > [docs] </ a > < span class ="k "> class</ span > < span class ="nc "> DataParallel</ span > < span class ="p "> (</ span > < span class ="n "> Module</ span > < span class ="p "> ):</ span >
376
+ < span class ="k "> class</ span > < span class ="nc "> DataParallel</ span > < span class ="p "> (</ span > < span class ="n "> Module</ span > < span class ="p "> ):</ span >
377
377
< span class ="sa "> r</ span > < span class ="sd "> """Implements data parallelism at the module level.</ span >
378
378
379
379
< span class ="sd "> This container parallelizes the application of the given :attr:`module` by</ span >
@@ -505,10 +505,10 @@ <h1>Source code for torch.nn.parallel.data_parallel</h1><div class="highlight"><
505
505
< span class ="k "> return</ span > < span class ="n "> parallel_apply</ span > < span class ="p "> (</ span > < span class ="n "> replicas</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="p "> ,</ span > < span class ="n "> kwargs</ span > < span class ="p "> ,</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> device_ids</ span > < span class ="p "> [:</ span > < span class ="nb "> len</ span > < span class ="p "> (</ span > < span class ="n "> replicas</ span > < span class ="p "> )])</ span >
506
506
507
507
< span class ="k "> def</ span > < span class ="nf "> gather</ span > < span class ="p "> (</ span > < span class ="bp "> self</ span > < span class ="p "> ,</ span > < span class ="n "> outputs</ span > < span class ="p "> ,</ span > < span class ="n "> output_device</ span > < span class ="p "> ):</ span >
508
- < span class ="k "> return</ span > < span class ="n "> gather</ span > < span class ="p "> (</ span > < span class ="n "> outputs</ span > < span class ="p "> ,</ span > < span class ="n "> output_device</ span > < span class ="p "> ,</ span > < span class ="n "> dim</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> dim</ span > < span class ="p "> )</ span > </ div >
508
+ < span class ="k "> return</ span > < span class ="n "> gather</ span > < span class ="p "> (</ span > < span class ="n "> outputs</ span > < span class ="p "> ,</ span > < span class ="n "> output_device</ span > < span class ="p "> ,</ span > < span class ="n "> dim</ span > < span class ="o "> =</ span > < span class ="bp "> self</ span > < span class ="o "> .</ span > < span class ="n "> dim</ span > < span class ="p "> )</ span >
509
509
510
510
511
- < span class ="k "> def</ span > < span class ="nf "> data_parallel</ span > < span class ="p "> (</ span > < span class ="n "> module</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="p "> ,</ span > < span class ="n "> device_ids</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> output_device</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> dim</ span > < span class ="o "> =</ span > < span class ="mi "> 0</ span > < span class ="p "> ,</ span > < span class ="n "> module_kwargs</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ):</ span >
511
+ < div class =" viewcode-block " id =" data_parallel " > < a class =" viewcode-back " href =" ../../../../nn.functional.html#torch.nn.parallel.data_parallel " > [docs] </ a > < span class ="k "> def</ span > < span class ="nf "> data_parallel</ span > < span class ="p "> (</ span > < span class ="n "> module</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="p "> ,</ span > < span class ="n "> device_ids</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> output_device</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ,</ span > < span class ="n "> dim</ span > < span class ="o "> =</ span > < span class ="mi "> 0</ span > < span class ="p "> ,</ span > < span class ="n "> module_kwargs</ span > < span class ="o "> =</ span > < span class ="kc "> None</ span > < span class ="p "> ):</ span >
512
512
< span class ="sa "> r</ span > < span class ="sd "> """Evaluates module(input) in parallel across the GPUs given in device_ids.</ span >
513
513
514
514
< span class ="sd "> This is the functional version of the DataParallel module.</ span >
@@ -548,7 +548,7 @@ <h1>Source code for torch.nn.parallel.data_parallel</h1><div class="highlight"><
548
548
< span class ="n "> used_device_ids</ span > < span class ="o "> =</ span > < span class ="n "> device_ids</ span > < span class ="p "> [:</ span > < span class ="nb "> len</ span > < span class ="p "> (</ span > < span class ="n "> inputs</ span > < span class ="p "> )]</ span >
549
549
< span class ="n "> replicas</ span > < span class ="o "> =</ span > < span class ="n "> replicate</ span > < span class ="p "> (</ span > < span class ="n "> module</ span > < span class ="p "> ,</ span > < span class ="n "> used_device_ids</ span > < span class ="p "> )</ span >
550
550
< span class ="n "> outputs</ span > < span class ="o "> =</ span > < span class ="n "> parallel_apply</ span > < span class ="p "> (</ span > < span class ="n "> replicas</ span > < span class ="p "> ,</ span > < span class ="n "> inputs</ span > < span class ="p "> ,</ span > < span class ="n "> module_kwargs</ span > < span class ="p "> ,</ span > < span class ="n "> used_device_ids</ span > < span class ="p "> )</ span >
551
- < span class ="k "> return</ span > < span class ="n "> gather</ span > < span class ="p "> (</ span > < span class ="n "> outputs</ span > < span class ="p "> ,</ span > < span class ="n "> output_device</ span > < span class ="p "> ,</ span > < span class ="n "> dim</ span > < span class ="p "> )</ span >
551
+ < span class ="k "> return</ span > < span class ="n "> gather</ span > < span class ="p "> (</ span > < span class ="n "> outputs</ span > < span class ="p "> ,</ span > < span class ="n "> output_device</ span > < span class ="p "> ,</ span > < span class ="n "> dim</ span > < span class ="p "> )</ span > </ div >
552
552
</ pre > </ div >
553
553
554
554
</ article >
0 commit comments