Skip to content

Commit 6b1ff25

Browse files
authoredMar 2, 2020
fix n_gpu count when no_cuda flag is activated (#3077)
* fix n_gpu count when no_cuda flag is activated * someone was left behind
1 parent 298bed1 commit 6b1ff25

13 files changed

+13
-13
lines changed
 

‎examples/contrib/run_swag.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -622,7 +622,7 @@ def main():
622622
# Setup CUDA, GPU & distributed training
623623
if args.local_rank == -1 or args.no_cuda:
624624
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
625-
args.n_gpu = torch.cuda.device_count()
625+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
626626
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
627627
torch.cuda.set_device(args.local_rank)
628628
device = torch.device("cuda", args.local_rank)

‎examples/distillation/run_squad_w_distillation.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -720,7 +720,7 @@ def main():
720720
# Setup CUDA, GPU & distributed training
721721
if args.local_rank == -1 or args.no_cuda:
722722
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
723-
args.n_gpu = torch.cuda.device_count()
723+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
724724
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
725725
torch.cuda.set_device(args.local_rank)
726726
device = torch.device("cuda", args.local_rank)

‎examples/hans/test_hans.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -520,7 +520,7 @@ def main():
520520
# Setup CUDA, GPU & distributed training
521521
if args.local_rank == -1 or args.no_cuda:
522522
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
523-
args.n_gpu = torch.cuda.device_count()
523+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
524524
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
525525
torch.cuda.set_device(args.local_rank)
526526
device = torch.device("cuda", args.local_rank)

‎examples/mm-imdb/run_mmimdb.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,7 @@ def main():
492492
# Setup CUDA, GPU & distributed training
493493
if args.local_rank == -1 or args.no_cuda:
494494
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
495-
args.n_gpu = torch.cuda.device_count()
495+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
496496
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
497497
torch.cuda.set_device(args.local_rank)
498498
device = torch.device("cuda", args.local_rank)

‎examples/ner/run_ner.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -557,7 +557,7 @@ def main():
557557
# Setup CUDA, GPU & distributed training
558558
if args.local_rank == -1 or args.no_cuda:
559559
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
560-
args.n_gpu = torch.cuda.device_count()
560+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
561561
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
562562
torch.cuda.set_device(args.local_rank)
563563
device = torch.device("cuda", args.local_rank)

‎examples/run_bertology.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -338,7 +338,7 @@ def main():
338338
# Setup devices and distributed training
339339
if args.local_rank == -1 or args.no_cuda:
340340
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
341-
args.n_gpu = torch.cuda.device_count()
341+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
342342
else:
343343
torch.cuda.set_device(args.local_rank)
344344
args.device = torch.device("cuda", args.local_rank)

‎examples/run_generation.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -189,7 +189,7 @@ def main():
189189
args = parser.parse_args()
190190

191191
args.device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
192-
args.n_gpu = torch.cuda.device_count()
192+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
193193

194194
set_seed(args)
195195

‎examples/run_glue.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -575,7 +575,7 @@ def main():
575575
# Setup CUDA, GPU & distributed training
576576
if args.local_rank == -1 or args.no_cuda:
577577
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
578-
args.n_gpu = torch.cuda.device_count()
578+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
579579
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
580580
torch.cuda.set_device(args.local_rank)
581581
device = torch.device("cuda", args.local_rank)

‎examples/run_language_modeling.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -663,7 +663,7 @@ def main():
663663
# Setup CUDA, GPU & distributed training
664664
if args.local_rank == -1 or args.no_cuda:
665665
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
666-
args.n_gpu = torch.cuda.device_count()
666+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
667667
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
668668
torch.cuda.set_device(args.local_rank)
669669
device = torch.device("cuda", args.local_rank)

‎examples/run_multiple_choice.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -535,7 +535,7 @@ def main():
535535
# Setup CUDA, GPU & distributed training
536536
if args.local_rank == -1 or args.no_cuda:
537537
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
538-
args.n_gpu = torch.cuda.device_count()
538+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
539539
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
540540
torch.cuda.set_device(args.local_rank)
541541
device = torch.device("cuda", args.local_rank)

‎examples/run_squad.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -725,7 +725,7 @@ def main():
725725
# Setup CUDA, GPU & distributed training
726726
if args.local_rank == -1 or args.no_cuda:
727727
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
728-
args.n_gpu = torch.cuda.device_count()
728+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
729729
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
730730
torch.cuda.set_device(args.local_rank)
731731
device = torch.device("cuda", args.local_rank)

‎examples/run_xnli.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -530,7 +530,7 @@ def main():
530530
# Setup CUDA, GPU & distributed training
531531
if args.local_rank == -1 or args.no_cuda:
532532
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
533-
args.n_gpu = torch.cuda.device_count()
533+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
534534
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
535535
torch.cuda.set_device(args.local_rank)
536536
device = torch.device("cuda", args.local_rank)

‎templates/adding_a_new_example_script/run_xxx.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -594,7 +594,7 @@ def main():
594594
# Setup CUDA, GPU & distributed training
595595
if args.local_rank == -1 or args.no_cuda:
596596
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
597-
args.n_gpu = torch.cuda.device_count()
597+
args.n_gpu = 0 if args.no_cuda else torch.cuda.device_count()
598598
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
599599
torch.cuda.set_device(args.local_rank)
600600
device = torch.device("cuda", args.local_rank)

0 commit comments

Comments
 (0)
Please sign in to comment.