Skip to content

Commit 40ea9ab

Browse files
authored
Add many missing spaces in adjacent strings (#26751)
Add missing spaces in adjacent strings
1 parent 3bc6550 commit 40ea9ab

File tree

154 files changed

+331
-331
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

154 files changed

+331
-331
lines changed

examples/flax/image-captioning/create_model_from_encoder_decoder_models.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -37,15 +37,15 @@ class ModelArguments:
3737
encoder_model_name_or_path: str = field(
3838
metadata={
3939
"help": (
40-
"The encoder model checkpoint for weights initialization."
40+
"The encoder model checkpoint for weights initialization. "
4141
"Don't set if you want to train an encoder model from scratch."
4242
)
4343
},
4444
)
4545
decoder_model_name_or_path: str = field(
4646
metadata={
4747
"help": (
48-
"The decoder model checkpoint for weights initialization."
48+
"The decoder model checkpoint for weights initialization. "
4949
"Don't set if you want to train a decoder model from scratch."
5050
)
5151
},

examples/flax/image-captioning/run_image_captioning_flax.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ class ModelArguments:
203203
metadata={
204204
"help": (
205205
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
206-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
206+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
207207
"execute code present on the Hub on your local machine."
208208
)
209209
},
@@ -256,7 +256,7 @@ class DataTrainingArguments:
256256
metadata={
257257
"help": (
258258
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
259-
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
259+
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. "
260260
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
261261
"during evaluation."
262262
)
@@ -423,7 +423,7 @@ def main():
423423
and not training_args.overwrite_output_dir
424424
):
425425
raise ValueError(
426-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
426+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
427427
"Use --overwrite_output_dir to overcome."
428428
)
429429

@@ -685,7 +685,7 @@ def preprocess_fn(examples, max_target_length, check_image=True):
685685
eval_batch_size = int(training_args.per_device_eval_batch_size) * jax.device_count()
686686
if training_args.block_size % train_batch_size > 0 or training_args.block_size % eval_batch_size > 0:
687687
raise ValueError(
688-
"`training_args.block_size` needs to be a multiple of the global train/eval batch size."
688+
"`training_args.block_size` needs to be a multiple of the global train/eval batch size. "
689689
f"Got {training_args.block_size}, {train_batch_size} and {eval_batch_size} respectively instead."
690690
)
691691

examples/flax/language-modeling/run_bart_dlm_flax.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -487,7 +487,7 @@ def main():
487487
and not training_args.overwrite_output_dir
488488
):
489489
raise ValueError(
490-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
490+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
491491
"Use --overwrite_output_dir to overcome."
492492
)
493493

@@ -606,7 +606,7 @@ def main():
606606
)
607607
else:
608608
raise ValueError(
609-
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
609+
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
610610
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
611611
)
612612

examples/flax/language-modeling/run_clm_flax.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ class ModelArguments:
190190
metadata={
191191
"help": (
192192
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
193-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
193+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
194194
"execute code present on the Hub on your local machine."
195195
)
196196
},
@@ -368,7 +368,7 @@ def main():
368368
and not training_args.overwrite_output_dir
369369
):
370370
raise ValueError(
371-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
371+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
372372
"Use --overwrite_output_dir to overcome."
373373
)
374374

@@ -524,7 +524,7 @@ def main():
524524
)
525525
else:
526526
raise ValueError(
527-
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
527+
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
528528
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
529529
)
530530

@@ -586,7 +586,7 @@ def tokenize_function(examples):
586586
else:
587587
if data_args.block_size > tokenizer.model_max_length:
588588
logger.warning(
589-
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model"
589+
f"The block_size passed ({data_args.block_size}) is larger than the maximum length for the model "
590590
f"({tokenizer.model_max_length}). Using block_size={tokenizer.model_max_length}."
591591
)
592592
block_size = min(data_args.block_size, tokenizer.model_max_length)

examples/flax/language-modeling/run_mlm_flax.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -195,7 +195,7 @@ class ModelArguments:
195195
metadata={
196196
"help": (
197197
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
198-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
198+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
199199
"execute code present on the Hub on your local machine."
200200
)
201201
},
@@ -411,7 +411,7 @@ def main():
411411
and not training_args.overwrite_output_dir
412412
):
413413
raise ValueError(
414-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
414+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
415415
"Use --overwrite_output_dir to overcome."
416416
)
417417

@@ -556,7 +556,7 @@ def main():
556556
)
557557
else:
558558
raise ValueError(
559-
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
559+
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
560560
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
561561
)
562562

examples/flax/language-modeling/run_t5_mlm_flax.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -528,7 +528,7 @@ def main():
528528
and not training_args.overwrite_output_dir
529529
):
530530
raise ValueError(
531-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
531+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
532532
"Use --overwrite_output_dir to overcome."
533533
)
534534

@@ -647,7 +647,7 @@ def main():
647647
)
648648
else:
649649
raise ValueError(
650-
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
650+
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
651651
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
652652
)
653653

examples/flax/question-answering/run_qa.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ class ModelArguments:
176176
metadata={
177177
"help": (
178178
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
179-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
179+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
180180
"execute code present on the Hub on your local machine."
181181
)
182182
},
@@ -582,7 +582,7 @@ def main():
582582

583583
if data_args.max_seq_length > tokenizer.model_max_length:
584584
logger.warning(
585-
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"
585+
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the "
586586
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
587587
)
588588
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)

examples/flax/speech-recognition/run_flax_speech_recognition_seq2seq.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ def main():
415415
and not training_args.overwrite_output_dir
416416
):
417417
raise ValueError(
418-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
418+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
419419
"Use `--overwrite_output_dir` to overcome."
420420
)
421421

examples/flax/summarization/run_summarization_flax.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -209,7 +209,7 @@ class ModelArguments:
209209
metadata={
210210
"help": (
211211
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
212-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
212+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
213213
"execute code present on the Hub on your local machine."
214214
)
215215
},
@@ -268,7 +268,7 @@ class DataTrainingArguments:
268268
metadata={
269269
"help": (
270270
"The maximum total sequence length for validation target text after tokenization. Sequences longer "
271-
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`."
271+
"than this will be truncated, sequences shorter will be padded. Will default to `max_target_length`. "
272272
"This argument is also used to override the `max_length` param of `model.generate`, which is used "
273273
"during evaluation."
274274
)
@@ -451,7 +451,7 @@ def main():
451451
and not training_args.overwrite_output_dir
452452
):
453453
raise ValueError(
454-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
454+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
455455
"Use --overwrite_output_dir to overcome."
456456
)
457457

@@ -558,7 +558,7 @@ def main():
558558
)
559559
else:
560560
raise ValueError(
561-
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
561+
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
562562
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
563563
)
564564

examples/flax/text-classification/run_flax_glue.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -122,7 +122,7 @@ class ModelArguments:
122122
metadata={
123123
"help": (
124124
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
125-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
125+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
126126
"execute code present on the Hub on your local machine."
127127
)
128128
},

examples/flax/token-classification/run_flax_ner.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -170,7 +170,7 @@ class ModelArguments:
170170
metadata={
171171
"help": (
172172
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
173-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
173+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
174174
"execute code present on the Hub on your local machine."
175175
)
176176
},

examples/flax/vision/run_image_classification.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -180,7 +180,7 @@ class ModelArguments:
180180
metadata={
181181
"help": (
182182
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
183-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
183+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
184184
"execute code present on the Hub on your local machine."
185185
)
186186
},
@@ -291,7 +291,7 @@ def main():
291291
and not training_args.overwrite_output_dir
292292
):
293293
raise ValueError(
294-
f"Output directory ({training_args.output_dir}) already exists and is not empty."
294+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
295295
"Use --overwrite_output_dir to overcome."
296296
)
297297

examples/legacy/multiple_choice/utils_multiple_choice.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -379,7 +379,7 @@ def get_test_examples(self, data_dir):
379379
"""See base class."""
380380
logger.info("LOOKING AT {} dev".format(data_dir))
381381
raise ValueError(
382-
"For swag testing, the input file does not contain a label column. It can not be tested in current code"
382+
"For swag testing, the input file does not contain a label column. It can not be tested in current code "
383383
"setting!"
384384
)
385385
return self._create_examples(self._read_csv(os.path.join(data_dir, "test.csv")), "test")
@@ -541,7 +541,7 @@ def convert_examples_to_features(
541541
if "num_truncated_tokens" in inputs and inputs["num_truncated_tokens"] > 0:
542542
logger.info(
543543
"Attention! you are cropping tokens (swag task is ok). "
544-
"If you are training ARC and RACE and you are poping question + options,"
544+
"If you are training ARC and RACE and you are poping question + options, "
545545
"you need to try to use a bigger max seq length!"
546546
)
547547

examples/legacy/pytorch-lightning/lightning_base.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -313,7 +313,7 @@ def add_generic_args(parser, root_dir) -> None:
313313
type=str,
314314
default="O2",
315315
help=(
316-
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
316+
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
317317
"See details at https://nvidia.github.io/apex/amp.html"
318318
),
319319
)

examples/legacy/question-answering/run_squad.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -663,7 +663,7 @@ def main():
663663
type=str,
664664
default="O1",
665665
help=(
666-
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
666+
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
667667
"See details at https://nvidia.github.io/apex/amp.html"
668668
),
669669
)

examples/legacy/run_language_modeling.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ class DataTrainingArguments:
149149
default=-1,
150150
metadata={
151151
"help": (
152-
"Optional input sequence length after tokenization."
152+
"Optional input sequence length after tokenization. "
153153
"The training dataset will be truncated in block of this size for training."
154154
"Default to the model max input length for single sentence inputs (take into account special tokens)."
155155
)
@@ -283,7 +283,7 @@ def main():
283283

284284
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
285285
raise ValueError(
286-
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
286+
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the "
287287
"--mlm flag (masked language modeling)."
288288
)
289289

examples/legacy/run_swag.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -579,7 +579,7 @@ def main():
579579
type=str,
580580
default="O1",
581581
help=(
582-
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
582+
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']. "
583583
"See details at https://nvidia.github.io/apex/amp.html"
584584
),
585585
)

examples/pytorch/audio-classification/run_audio_classification.py

+3-3
Original file line numberDiff line numberDiff line change
@@ -172,7 +172,7 @@ class ModelArguments:
172172
metadata={
173173
"help": (
174174
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
175-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
175+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
176176
"execute code present on the Hub on your local machine."
177177
)
178178
},
@@ -189,14 +189,14 @@ def __post_init__(self):
189189
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
190190
warnings.warn(
191191
"The argument `--freeze_feature_extractor` is deprecated and "
192-
"will be removed in a future version. Use `--freeze_feature_encoder`"
192+
"will be removed in a future version. Use `--freeze_feature_encoder` "
193193
"instead. Setting `freeze_feature_encoder==True`.",
194194
FutureWarning,
195195
)
196196
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
197197
raise ValueError(
198198
"The argument `--freeze_feature_extractor` is deprecated and "
199-
"should not be used in combination with `--freeze_feature_encoder`."
199+
"should not be used in combination with `--freeze_feature_encoder`. "
200200
"Only make use of `--freeze_feature_encoder`."
201201
)
202202

examples/pytorch/contrastive-image-text/run_clip.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -107,7 +107,7 @@ class ModelArguments:
107107
metadata={
108108
"help": (
109109
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
110-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
110+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
111111
"execute code present on the Hub on your local machine."
112112
)
113113
},
@@ -358,7 +358,7 @@ def main():
358358
)
359359
else:
360360
raise ValueError(
361-
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
361+
"You are instantiating a new tokenizer from scratch. This is not supported by this script. "
362362
"You can do it from another script, save it, and load it from here, using --tokenizer_name."
363363
)
364364

examples/pytorch/image-classification/run_image_classification.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ class ModelArguments:
163163
metadata={
164164
"help": (
165165
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
166-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
166+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
167167
"execute code present on the Hub on your local machine."
168168
)
169169
},

examples/pytorch/image-classification/run_image_classification_no_trainer.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -152,7 +152,7 @@ def parse_args():
152152
default=False,
153153
help=(
154154
"Whether or not to allow for custom models defined on the Hub in their own modeling files. This option"
155-
"should only be set to `True` for repositories you trust and in which you have read the code, as it will"
155+
"should only be set to `True` for repositories you trust and in which you have read the code, as it will "
156156
"execute code present on the Hub on your local machine."
157157
),
158158
)
@@ -179,7 +179,7 @@ def parse_args():
179179
default="all",
180180
help=(
181181
'The integration to report the results and logs to. Supported platforms are `"tensorboard"`,'
182-
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations.'
182+
' `"wandb"`, `"comet_ml"` and `"clearml"`. Use `"all"` (default) to report to all integrations. '
183183
"Only applicable when `--with_tracking` is passed."
184184
),
185185
)

0 commit comments

Comments
 (0)