Skip to content

Commit b8ab541

Browse files
authored
Don't log anything before logging is setup in examples (#12121)
* Don't log anything before logging is setup in examples * Last example
1 parent 7566fef commit b8ab541

File tree

11 files changed

+171
-171
lines changed

11 files changed

+171
-171
lines changed

examples/pytorch/language-modeling/run_clm.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -194,21 +194,6 @@ def main():
194194
else:
195195
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
196196

197-
# Detecting last checkpoint.
198-
last_checkpoint = None
199-
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
200-
last_checkpoint = get_last_checkpoint(training_args.output_dir)
201-
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
202-
raise ValueError(
203-
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
204-
"Use --overwrite_output_dir to overcome."
205-
)
206-
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
207-
logger.info(
208-
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
209-
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
210-
)
211-
212197
# Setup logging
213198
logging.basicConfig(
214199
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@@ -229,6 +214,21 @@ def main():
229214
transformers.utils.logging.enable_explicit_format()
230215
logger.info(f"Training/evaluation parameters {training_args}")
231216

217+
# Detecting last checkpoint.
218+
last_checkpoint = None
219+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
220+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
221+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
222+
raise ValueError(
223+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
224+
"Use --overwrite_output_dir to overcome."
225+
)
226+
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
227+
logger.info(
228+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
229+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
230+
)
231+
232232
# Set seed before initializing model.
233233
set_seed(training_args.seed)
234234

examples/pytorch/language-modeling/run_mlm.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -190,21 +190,6 @@ def main():
190190
else:
191191
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
192192

193-
# Detecting last checkpoint.
194-
last_checkpoint = None
195-
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
196-
last_checkpoint = get_last_checkpoint(training_args.output_dir)
197-
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
198-
raise ValueError(
199-
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
200-
"Use --overwrite_output_dir to overcome."
201-
)
202-
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
203-
logger.info(
204-
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
205-
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
206-
)
207-
208193
# Setup logging
209194
logging.basicConfig(
210195
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@@ -225,6 +210,21 @@ def main():
225210
transformers.utils.logging.enable_explicit_format()
226211
logger.info(f"Training/evaluation parameters {training_args}")
227212

213+
# Detecting last checkpoint.
214+
last_checkpoint = None
215+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
216+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
217+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
218+
raise ValueError(
219+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
220+
"Use --overwrite_output_dir to overcome."
221+
)
222+
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
223+
logger.info(
224+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
225+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
226+
)
227+
228228
# Set seed before initializing model.
229229
set_seed(training_args.seed)
230230

examples/pytorch/language-modeling/run_plm.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -187,21 +187,6 @@ def main():
187187
else:
188188
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
189189

190-
# Detecting last checkpoint.
191-
last_checkpoint = None
192-
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
193-
last_checkpoint = get_last_checkpoint(training_args.output_dir)
194-
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
195-
raise ValueError(
196-
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
197-
"Use --overwrite_output_dir to overcome."
198-
)
199-
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
200-
logger.info(
201-
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
202-
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
203-
)
204-
205190
# Setup logging
206191
logging.basicConfig(
207192
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@@ -222,6 +207,21 @@ def main():
222207
transformers.utils.logging.enable_explicit_format()
223208
logger.info(f"Training/evaluation parameters {training_args}")
224209

210+
# Detecting last checkpoint.
211+
last_checkpoint = None
212+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
213+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
214+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
215+
raise ValueError(
216+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
217+
"Use --overwrite_output_dir to overcome."
218+
)
219+
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
220+
logger.info(
221+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
222+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
223+
)
224+
225225
# Set seed before initializing model.
226226
set_seed(training_args.seed)
227227

examples/pytorch/multiple-choice/run_swag.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -214,21 +214,6 @@ def main():
214214
else:
215215
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
216216

217-
# Detecting last checkpoint.
218-
last_checkpoint = None
219-
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
220-
last_checkpoint = get_last_checkpoint(training_args.output_dir)
221-
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
222-
raise ValueError(
223-
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
224-
"Use --overwrite_output_dir to overcome."
225-
)
226-
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
227-
logger.info(
228-
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
229-
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
230-
)
231-
232217
# Setup logging
233218
logging.basicConfig(
234219
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@@ -249,6 +234,21 @@ def main():
249234
transformers.utils.logging.enable_explicit_format()
250235
logger.info(f"Training/evaluation parameters {training_args}")
251236

237+
# Detecting last checkpoint.
238+
last_checkpoint = None
239+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
240+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
241+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
242+
raise ValueError(
243+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
244+
"Use --overwrite_output_dir to overcome."
245+
)
246+
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
247+
logger.info(
248+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
249+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
250+
)
251+
252252
# Set seed before initializing model.
253253
set_seed(training_args.seed)
254254

examples/pytorch/question-answering/run_qa.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -207,21 +207,6 @@ def main():
207207
else:
208208
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
209209

210-
# Detecting last checkpoint.
211-
last_checkpoint = None
212-
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
213-
last_checkpoint = get_last_checkpoint(training_args.output_dir)
214-
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
215-
raise ValueError(
216-
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
217-
"Use --overwrite_output_dir to overcome."
218-
)
219-
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
220-
logger.info(
221-
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
222-
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
223-
)
224-
225210
# Setup logging
226211
logging.basicConfig(
227212
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@@ -242,6 +227,21 @@ def main():
242227
transformers.utils.logging.enable_explicit_format()
243228
logger.info(f"Training/evaluation parameters {training_args}")
244229

230+
# Detecting last checkpoint.
231+
last_checkpoint = None
232+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
233+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
234+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
235+
raise ValueError(
236+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
237+
"Use --overwrite_output_dir to overcome."
238+
)
239+
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
240+
logger.info(
241+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
242+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
243+
)
244+
245245
# Set seed before initializing model.
246246
set_seed(training_args.seed)
247247

examples/pytorch/question-answering/run_qa_beam_search.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -206,21 +206,6 @@ def main():
206206
else:
207207
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
208208

209-
# Detecting last checkpoint.
210-
last_checkpoint = None
211-
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
212-
last_checkpoint = get_last_checkpoint(training_args.output_dir)
213-
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
214-
raise ValueError(
215-
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
216-
"Use --overwrite_output_dir to overcome."
217-
)
218-
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
219-
logger.info(
220-
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
221-
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
222-
)
223-
224209
# Setup logging
225210
logging.basicConfig(
226211
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@@ -241,6 +226,21 @@ def main():
241226
transformers.utils.logging.enable_explicit_format()
242227
logger.info(f"Training/evaluation parameters {training_args}")
243228

229+
# Detecting last checkpoint.
230+
last_checkpoint = None
231+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
232+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
233+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
234+
raise ValueError(
235+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
236+
"Use --overwrite_output_dir to overcome."
237+
)
238+
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
239+
logger.info(
240+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
241+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
242+
)
243+
244244
# Set seed before initializing model.
245245
set_seed(training_args.seed)
246246

examples/pytorch/summarization/run_summarization.py

+18-18
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,24 @@ def main():
251251
else:
252252
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
253253

254+
# Setup logging
255+
logging.basicConfig(
256+
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
257+
datefmt="%m/%d/%Y %H:%M:%S",
258+
handlers=[logging.StreamHandler(sys.stdout)],
259+
)
260+
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
261+
262+
# Log on each process the small summary:
263+
logger.warning(
264+
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
265+
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
266+
)
267+
# Set the verbosity to info of the Transformers logger (on main process only):
268+
if training_args.should_log:
269+
transformers.utils.logging.set_verbosity_info()
270+
logger.info(f"Training/evaluation parameters {training_args}")
271+
254272
if data_args.source_prefix is None and model_args.model_name_or_path in [
255273
"t5-small",
256274
"t5-base",
@@ -278,24 +296,6 @@ def main():
278296
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
279297
)
280298

281-
# Setup logging
282-
logging.basicConfig(
283-
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
284-
datefmt="%m/%d/%Y %H:%M:%S",
285-
handlers=[logging.StreamHandler(sys.stdout)],
286-
)
287-
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN)
288-
289-
# Log on each process the small summary:
290-
logger.warning(
291-
f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
292-
+ f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
293-
)
294-
# Set the verbosity to info of the Transformers logger (on main process only):
295-
if training_args.should_log:
296-
transformers.utils.logging.set_verbosity_info()
297-
logger.info(f"Training/evaluation parameters {training_args}")
298-
299299
# Set seed before initializing model.
300300
set_seed(training_args.seed)
301301

examples/pytorch/text-classification/run_glue.py

+15-15
Original file line numberDiff line numberDiff line change
@@ -197,21 +197,6 @@ def main():
197197
else:
198198
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
199199

200-
# Detecting last checkpoint.
201-
last_checkpoint = None
202-
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
203-
last_checkpoint = get_last_checkpoint(training_args.output_dir)
204-
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
205-
raise ValueError(
206-
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
207-
"Use --overwrite_output_dir to overcome."
208-
)
209-
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
210-
logger.info(
211-
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
212-
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
213-
)
214-
215200
# Setup logging
216201
logging.basicConfig(
217202
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
@@ -232,6 +217,21 @@ def main():
232217
transformers.utils.logging.enable_explicit_format()
233218
logger.info(f"Training/evaluation parameters {training_args}")
234219

220+
# Detecting last checkpoint.
221+
last_checkpoint = None
222+
if os.path.isdir(training_args.output_dir) and training_args.do_train and not training_args.overwrite_output_dir:
223+
last_checkpoint = get_last_checkpoint(training_args.output_dir)
224+
if last_checkpoint is None and len(os.listdir(training_args.output_dir)) > 0:
225+
raise ValueError(
226+
f"Output directory ({training_args.output_dir}) already exists and is not empty. "
227+
"Use --overwrite_output_dir to overcome."
228+
)
229+
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
230+
logger.info(
231+
f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
232+
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
233+
)
234+
235235
# Set seed before initializing model.
236236
set_seed(training_args.seed)
237237

0 commit comments

Comments
 (0)