Skip to content

Commit f1fe184

Browse files
authored
Use labels to remove deprecation warnings (#4807)
1 parent 5c0cfc2 commit f1fe184

10 files changed

+17
-17
lines changed

tests/test_modeling_albert.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -162,7 +162,7 @@ def create_and_check_albert_for_pretraining(
162162
input_ids,
163163
attention_mask=input_mask,
164164
token_type_ids=token_type_ids,
165-
masked_lm_labels=token_labels,
165+
labels=token_labels,
166166
sentence_order_label=sequence_labels,
167167
)
168168
result = {
@@ -183,7 +183,7 @@ def create_and_check_albert_for_masked_lm(
183183
model.to(torch_device)
184184
model.eval()
185185
loss, prediction_scores = model(
186-
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
186+
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
187187
)
188188
result = {
189189
"loss": loss,

tests/test_modeling_bart.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -296,7 +296,7 @@ def test_mbart_fast_forward(self):
296296
lm_model = BartForConditionalGeneration(config).to(torch_device)
297297
context = torch.Tensor([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]]).long().to(torch_device)
298298
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
299-
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, lm_labels=summary)
299+
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
300300
expected_shape = (*summary.shape, config.vocab_size)
301301
self.assertEqual(logits.shape, expected_shape)
302302

@@ -361,7 +361,7 @@ def test_lm_forward(self):
361361
lm_labels = ids_tensor([batch_size, input_ids.shape[1]], self.vocab_size).to(torch_device)
362362
lm_model = BartForConditionalGeneration(config)
363363
lm_model.to(torch_device)
364-
loss, logits, enc_features = lm_model(input_ids=input_ids, lm_labels=lm_labels)
364+
loss, logits, enc_features = lm_model(input_ids=input_ids, labels=lm_labels)
365365
expected_shape = (batch_size, input_ids.shape[1], config.vocab_size)
366366
self.assertEqual(logits.shape, expected_shape)
367367
self.assertIsInstance(loss.item(), float)
@@ -381,7 +381,7 @@ def test_lm_uneven_forward(self):
381381
lm_model = BartForConditionalGeneration(config).to(torch_device)
382382
context = torch.Tensor([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]]).long().to(torch_device)
383383
summary = torch.Tensor([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]]).long().to(torch_device)
384-
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, lm_labels=summary)
384+
loss, logits, enc_features = lm_model(input_ids=context, decoder_input_ids=summary, labels=summary)
385385
expected_shape = (*summary.shape, config.vocab_size)
386386
self.assertEqual(logits.shape, expected_shape)
387387

tests/test_modeling_bert.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -218,7 +218,7 @@ def create_and_check_bert_for_masked_lm(
218218
model.to(torch_device)
219219
model.eval()
220220
loss, prediction_scores = model(
221-
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
221+
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
222222
)
223223
result = {
224224
"loss": loss,
@@ -248,15 +248,15 @@ def create_and_check_bert_model_for_masked_lm_as_decoder(
248248
input_ids,
249249
attention_mask=input_mask,
250250
token_type_ids=token_type_ids,
251-
masked_lm_labels=token_labels,
251+
labels=token_labels,
252252
encoder_hidden_states=encoder_hidden_states,
253253
encoder_attention_mask=encoder_attention_mask,
254254
)
255255
loss, prediction_scores = model(
256256
input_ids,
257257
attention_mask=input_mask,
258258
token_type_ids=token_type_ids,
259-
masked_lm_labels=token_labels,
259+
labels=token_labels,
260260
encoder_hidden_states=encoder_hidden_states,
261261
)
262262
result = {
@@ -294,7 +294,7 @@ def create_and_check_bert_for_pretraining(
294294
input_ids,
295295
attention_mask=input_mask,
296296
token_type_ids=token_type_ids,
297-
masked_lm_labels=token_labels,
297+
labels=token_labels,
298298
next_sentence_label=sequence_labels,
299299
)
300300
result = {

tests/test_modeling_distilbert.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -151,7 +151,7 @@ def create_and_check_distilbert_for_masked_lm(
151151
model = DistilBertForMaskedLM(config=config)
152152
model.to(torch_device)
153153
model.eval()
154-
loss, prediction_scores = model(input_ids, attention_mask=input_mask, masked_lm_labels=token_labels)
154+
loss, prediction_scores = model(input_ids, attention_mask=input_mask, labels=token_labels)
155155
result = {
156156
"loss": loss,
157157
"prediction_scores": prediction_scores,

tests/test_modeling_electra.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ def create_and_check_electra_for_masked_lm(
180180
model.to(torch_device)
181181
model.eval()
182182
loss, prediction_scores = model(
183-
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
183+
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
184184
)
185185
result = {
186186
"loss": loss,

tests/test_modeling_gpt2.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -268,7 +268,7 @@ def create_and_check_double_lm_head_model(
268268
"mc_token_ids": mc_token_ids,
269269
"attention_mask": multiple_choice_input_mask,
270270
"token_type_ids": multiple_choice_token_type_ids,
271-
"lm_labels": multiple_choice_inputs_ids,
271+
"labels": multiple_choice_inputs_ids,
272272
}
273273

274274
loss, lm_logits, mc_logits, _ = model(**inputs)

tests/test_modeling_longformer.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ def create_and_check_longformer_for_masked_lm(
164164
model.to(torch_device)
165165
model.eval()
166166
loss, prediction_scores = model(
167-
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
167+
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
168168
)
169169
result = {
170170
"loss": loss,
@@ -361,7 +361,7 @@ def test_inference_masked_lm(self):
361361
[[0] + [20920, 232, 328, 1437] * 1000 + [2]], dtype=torch.long, device=torch_device
362362
) # long input
363363

364-
loss, prediction_scores = model(input_ids, masked_lm_labels=input_ids)
364+
loss, prediction_scores = model(input_ids, labels=input_ids)
365365

366366
expected_loss = torch.tensor(0.0620, device=torch_device)
367367
expected_prediction_scores_sum = torch.tensor(-6.1599e08, device=torch_device)

tests/test_modeling_openai.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -169,7 +169,7 @@ def create_and_check_double_lm_head_model(self, config, input_ids, head_mask, to
169169
model.to(torch_device)
170170
model.eval()
171171

172-
loss, lm_logits, mc_logits = model(input_ids, token_type_ids=token_type_ids, lm_labels=input_ids)
172+
loss, lm_logits, mc_logits = model(input_ids, token_type_ids=token_type_ids, labels=input_ids)
173173

174174
result = {"loss": loss, "lm_logits": lm_logits}
175175

tests/test_modeling_roberta.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -155,7 +155,7 @@ def create_and_check_roberta_for_masked_lm(
155155
model.to(torch_device)
156156
model.eval()
157157
loss, prediction_scores = model(
158-
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, masked_lm_labels=token_labels
158+
input_ids, attention_mask=input_mask, token_type_ids=token_type_ids, labels=token_labels
159159
)
160160
result = {
161161
"loss": loss,

tests/test_modeling_t5.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -206,7 +206,7 @@ def create_and_check_t5_with_lm_head(
206206
input_ids=input_ids,
207207
decoder_input_ids=decoder_input_ids,
208208
decoder_attention_mask=decoder_attention_mask,
209-
lm_labels=lm_labels,
209+
labels=lm_labels,
210210
)
211211
loss, prediction_scores, _, _ = outputs
212212
self.parent.assertEqual(len(outputs), 4)

0 commit comments

Comments
 (0)