File tree 13 files changed +13
-13
lines changed
templates/adding_a_new_example_script
13 files changed +13
-13
lines changed Original file line number Diff line number Diff line change @@ -622,7 +622,7 @@ def main():
622
622
# Setup CUDA, GPU & distributed training
623
623
if args .local_rank == - 1 or args .no_cuda :
624
624
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
625
- args .n_gpu = torch .cuda .device_count ()
625
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
626
626
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
627
627
torch .cuda .set_device (args .local_rank )
628
628
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -720,7 +720,7 @@ def main():
720
720
# Setup CUDA, GPU & distributed training
721
721
if args .local_rank == - 1 or args .no_cuda :
722
722
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
723
- args .n_gpu = torch .cuda .device_count ()
723
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
724
724
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
725
725
torch .cuda .set_device (args .local_rank )
726
726
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -520,7 +520,7 @@ def main():
520
520
# Setup CUDA, GPU & distributed training
521
521
if args .local_rank == - 1 or args .no_cuda :
522
522
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
523
- args .n_gpu = torch .cuda .device_count ()
523
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
524
524
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
525
525
torch .cuda .set_device (args .local_rank )
526
526
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -492,7 +492,7 @@ def main():
492
492
# Setup CUDA, GPU & distributed training
493
493
if args .local_rank == - 1 or args .no_cuda :
494
494
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
495
- args .n_gpu = torch .cuda .device_count ()
495
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
496
496
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
497
497
torch .cuda .set_device (args .local_rank )
498
498
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -557,7 +557,7 @@ def main():
557
557
# Setup CUDA, GPU & distributed training
558
558
if args .local_rank == - 1 or args .no_cuda :
559
559
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
560
- args .n_gpu = torch .cuda .device_count ()
560
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
561
561
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
562
562
torch .cuda .set_device (args .local_rank )
563
563
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -338,7 +338,7 @@ def main():
338
338
# Setup devices and distributed training
339
339
if args .local_rank == - 1 or args .no_cuda :
340
340
args .device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
341
- args .n_gpu = torch .cuda .device_count ()
341
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
342
342
else :
343
343
torch .cuda .set_device (args .local_rank )
344
344
args .device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -189,7 +189,7 @@ def main():
189
189
args = parser .parse_args ()
190
190
191
191
args .device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
192
- args .n_gpu = torch .cuda .device_count ()
192
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
193
193
194
194
set_seed (args )
195
195
Original file line number Diff line number Diff line change @@ -575,7 +575,7 @@ def main():
575
575
# Setup CUDA, GPU & distributed training
576
576
if args .local_rank == - 1 or args .no_cuda :
577
577
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
578
- args .n_gpu = torch .cuda .device_count ()
578
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
579
579
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
580
580
torch .cuda .set_device (args .local_rank )
581
581
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -663,7 +663,7 @@ def main():
663
663
# Setup CUDA, GPU & distributed training
664
664
if args .local_rank == - 1 or args .no_cuda :
665
665
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
666
- args .n_gpu = torch .cuda .device_count ()
666
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
667
667
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
668
668
torch .cuda .set_device (args .local_rank )
669
669
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -535,7 +535,7 @@ def main():
535
535
# Setup CUDA, GPU & distributed training
536
536
if args .local_rank == - 1 or args .no_cuda :
537
537
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
538
- args .n_gpu = torch .cuda .device_count ()
538
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
539
539
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
540
540
torch .cuda .set_device (args .local_rank )
541
541
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -725,7 +725,7 @@ def main():
725
725
# Setup CUDA, GPU & distributed training
726
726
if args .local_rank == - 1 or args .no_cuda :
727
727
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
728
- args .n_gpu = torch .cuda .device_count ()
728
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
729
729
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
730
730
torch .cuda .set_device (args .local_rank )
731
731
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -530,7 +530,7 @@ def main():
530
530
# Setup CUDA, GPU & distributed training
531
531
if args .local_rank == - 1 or args .no_cuda :
532
532
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
533
- args .n_gpu = torch .cuda .device_count ()
533
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
534
534
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
535
535
torch .cuda .set_device (args .local_rank )
536
536
device = torch .device ("cuda" , args .local_rank )
Original file line number Diff line number Diff line change @@ -594,7 +594,7 @@ def main():
594
594
# Setup CUDA, GPU & distributed training
595
595
if args .local_rank == - 1 or args .no_cuda :
596
596
device = torch .device ("cuda" if torch .cuda .is_available () and not args .no_cuda else "cpu" )
597
- args .n_gpu = torch .cuda .device_count ()
597
+ args .n_gpu = 0 if args . no_cuda else torch .cuda .device_count ()
598
598
else : # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
599
599
torch .cuda .set_device (args .local_rank )
600
600
device = torch .device ("cuda" , args .local_rank )
You can’t perform that action at this time.
0 commit comments