Skip to content

Commit 76be189

Browse files
committed
typos
1 parent a615499 commit 76be189

File tree

2 files changed

+4
-4
lines changed

2 files changed

+4
-4
lines changed

examples/run_glue.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ def train(args, train_dataset, model, tokenizer):
116116
'attention_mask': batch[1],
117117
'token_type_ids': batch[2] if args.model_type in ['bert', 'xlnet'] else None, # XLM don't use segment_ids
118118
'labels': batch[3]}
119-
ouputs = model(**inputs)
120-
loss = ouputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
119+
outputs = model(**inputs)
120+
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
121121

122122
if args.n_gpu > 1:
123123
loss = loss.mean() # mean() to average on multi-gpu parallel training

examples/run_squad.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -129,8 +129,8 @@ def train(args, train_dataset, model, tokenizer):
129129
if args.model_type in ['xlnet', 'xlm']:
130130
inputs.update({'cls_index': batch[5],
131131
'p_mask': batch[6]})
132-
ouputs = model(**inputs)
133-
loss = ouputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
132+
outputs = model(**inputs)
133+
loss = outputs[0] # model outputs are always tuple in pytorch-transformers (see doc)
134134

135135
if args.n_gpu > 1:
136136
loss = loss.mean() # mean() to average on multi-gpu parallel (not distributed) training

0 commit comments

Comments
 (0)