Skip to content

Commit 3cab902

Browse files
authored
Add examples telemetry (#17552)
* Add examples telemetry * Alternative approach * Add to all other examples * Add to templates as well * Put framework separately * Same for TensorFlow
1 parent 9e72eb4 commit 3cab902

File tree

53 files changed

+299
-57
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+299
-57
lines changed

examples/flax/image-captioning/run_image_captioning_flax.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
HfArgumentParser,
5353
is_tensorboard_available,
5454
)
55-
from transformers.utils import get_full_repo_name, is_offline_mode
55+
from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry
5656

5757

5858
logger = logging.getLogger(__name__)
@@ -388,6 +388,10 @@ def main():
388388
else:
389389
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
390390

391+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
392+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
393+
send_example_telemetry("run_image_captioning", model_args, data_args, framework="flax")
394+
391395
if (
392396
os.path.exists(training_args.output_dir)
393397
and os.listdir(training_args.output_dir)

examples/flax/language-modeling/run_clm_flax.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@
5858
set_seed,
5959
)
6060
from transformers.testing_utils import CaptureLogger
61-
from transformers.utils import get_full_repo_name
61+
from transformers.utils import get_full_repo_name, send_example_telemetry
6262

6363

6464
logger = logging.getLogger(__name__)
@@ -328,6 +328,10 @@ def main():
328328
else:
329329
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
330330

331+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
332+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
333+
send_example_telemetry("run_clm", model_args, data_args, framework="flax")
334+
331335
if (
332336
os.path.exists(training_args.output_dir)
333337
and os.listdir(training_args.output_dir)

examples/flax/language-modeling/run_mlm_flax.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@
5858
is_tensorboard_available,
5959
set_seed,
6060
)
61-
from transformers.utils import get_full_repo_name
61+
from transformers.utils import get_full_repo_name, send_example_telemetry
6262

6363

6464
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
@@ -365,6 +365,10 @@ def main():
365365
else:
366366
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
367367

368+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
369+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
370+
send_example_telemetry("run_mlm", model_args, data_args, framework="flax")
371+
368372
if (
369373
os.path.exists(training_args.output_dir)
370374
and os.listdir(training_args.output_dir)

examples/flax/language-modeling/run_t5_mlm_flax.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@
5757
set_seed,
5858
)
5959
from transformers.models.t5.modeling_flax_t5 import shift_tokens_right
60-
from transformers.utils import get_full_repo_name
60+
from transformers.utils import get_full_repo_name, send_example_telemetry
6161

6262

6363
MODEL_CONFIG_CLASSES = list(FLAX_MODEL_FOR_MASKED_LM_MAPPING.keys())
@@ -498,6 +498,10 @@ def main():
498498
else:
499499
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
500500

501+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
502+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
503+
send_example_telemetry("run_t5_mlm", model_args, data_args, framework="flax")
504+
501505
if (
502506
os.path.exists(training_args.output_dir)
503507
and os.listdir(training_args.output_dir)

examples/flax/question-answering/run_qa.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
PreTrainedTokenizerFast,
5454
is_tensorboard_available,
5555
)
56-
from transformers.utils import check_min_version, get_full_repo_name
56+
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
5757
from utils_qa import postprocess_qa_predictions
5858

5959

@@ -424,6 +424,10 @@ def main():
424424
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
425425
else:
426426
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
427+
428+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
429+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
430+
send_example_telemetry("run_qa", model_args, data_args, framework="flax")
427431
# endregion
428432

429433
# region Logging

examples/flax/summarization/run_summarization_flax.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@
5454
HfArgumentParser,
5555
is_tensorboard_available,
5656
)
57-
from transformers.utils import get_full_repo_name, is_offline_mode
57+
from transformers.utils import get_full_repo_name, is_offline_mode, send_example_telemetry
5858

5959

6060
logger = logging.getLogger(__name__)
@@ -399,6 +399,10 @@ def main():
399399
else:
400400
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
401401

402+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
403+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
404+
send_example_telemetry("run_summarization", model_args, data_args, framework="flax")
405+
402406
if (
403407
os.path.exists(training_args.output_dir)
404408
and os.listdir(training_args.output_dir)

examples/flax/text-classification/run_flax_glue.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
TrainingArguments,
4949
is_tensorboard_available,
5050
)
51-
from transformers.utils import check_min_version, get_full_repo_name
51+
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
5252

5353

5454
logger = logging.getLogger(__name__)
@@ -308,6 +308,10 @@ def main():
308308
else:
309309
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
310310

311+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
312+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
313+
send_example_telemetry("run_glue", model_args, data_args, framework="flax")
314+
311315
# Make one log on every process with the configuration for debugging.
312316
logging.basicConfig(
313317
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/flax/token-classification/run_flax_ner.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
HfArgumentParser,
4848
is_tensorboard_available,
4949
)
50-
from transformers.utils import check_min_version, get_full_repo_name
50+
from transformers.utils import check_min_version, get_full_repo_name, send_example_telemetry
5151
from transformers.utils.versions import require_version
5252

5353

@@ -366,6 +366,10 @@ def main():
366366
else:
367367
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
368368

369+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
370+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
371+
send_example_telemetry("run_ner", model_args, data_args, framework="flax")
372+
369373
# Make one log on every process with the configuration for debugging.
370374
logging.basicConfig(
371375
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/flax/vision/run_image_classification.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
is_tensorboard_available,
5454
set_seed,
5555
)
56-
from transformers.utils import get_full_repo_name
56+
from transformers.utils import get_full_repo_name, send_example_telemetry
5757

5858

5959
logger = logging.getLogger(__name__)
@@ -256,6 +256,10 @@ def main():
256256
else:
257257
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
258258

259+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
260+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
261+
send_example_telemetry("run_image_classification", model_args, data_args, framework="flax")
262+
259263
if (
260264
os.path.exists(training_args.output_dir)
261265
and os.listdir(training_args.output_dir)

examples/pytorch/audio-classification/run_audio_classification.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
set_seed,
3838
)
3939
from transformers.trainer_utils import get_last_checkpoint
40-
from transformers.utils import check_min_version
40+
from transformers.utils import check_min_version, send_example_telemetry
4141
from transformers.utils.versions import require_version
4242

4343

@@ -197,6 +197,10 @@ def main():
197197
else:
198198
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
199199

200+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
201+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
202+
send_example_telemetry("run_audio_classification", model_args, data_args)
203+
200204
# Setup logging
201205
logging.basicConfig(
202206
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/pytorch/contrastive-image-text/run_clip.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
set_seed,
4848
)
4949
from transformers.trainer_utils import get_last_checkpoint
50-
from transformers.utils import check_min_version
50+
from transformers.utils import check_min_version, send_example_telemetry
5151
from transformers.utils.versions import require_version
5252

5353

@@ -233,6 +233,10 @@ def main():
233233
else:
234234
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
235235

236+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
237+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
238+
send_example_telemetry("run_clip", model_args, data_args)
239+
236240
# 2. Setup logging
237241
logging.basicConfig(
238242
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/pytorch/image-classification/run_image_classification.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
TrainingArguments,
4646
)
4747
from transformers.trainer_utils import get_last_checkpoint
48-
from transformers.utils import check_min_version
48+
from transformers.utils import check_min_version, send_example_telemetry
4949
from transformers.utils.versions import require_version
5050

5151

@@ -175,6 +175,10 @@ def main():
175175
else:
176176
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
177177

178+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
179+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
180+
send_example_telemetry("run_image_classification", model_args, data_args)
181+
178182
# Setup logging
179183
logging.basicConfig(
180184
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/pytorch/image-classification/run_image_classification_no_trainer.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
SchedulerType,
4848
get_scheduler,
4949
)
50-
from transformers.utils import get_full_repo_name
50+
from transformers.utils import get_full_repo_name, send_example_telemetry
5151
from transformers.utils.versions import require_version
5252

5353

@@ -201,6 +201,10 @@ def parse_args():
201201
def main():
202202
args = parse_args()
203203

204+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
205+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
206+
send_example_telemetry("run_image_classification_no_trainer", args)
207+
204208
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
205209
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
206210
# in the environment

examples/pytorch/image-pretraining/run_mae.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@
3434
ViTMAEForPreTraining,
3535
)
3636
from transformers.trainer_utils import get_last_checkpoint
37-
from transformers.utils import check_min_version
37+
from transformers.utils import check_min_version, send_example_telemetry
3838
from transformers.utils.versions import require_version
3939

4040

@@ -175,6 +175,10 @@ def main():
175175
else:
176176
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
177177

178+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
179+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
180+
send_example_telemetry("run_mae", model_args, data_args)
181+
178182
# Setup logging
179183
logging.basicConfig(
180184
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/pytorch/image-pretraining/run_mim.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@
3737
TrainingArguments,
3838
)
3939
from transformers.trainer_utils import get_last_checkpoint
40-
from transformers.utils import check_min_version
40+
from transformers.utils import check_min_version, send_example_telemetry
4141
from transformers.utils.versions import require_version
4242

4343

@@ -239,6 +239,10 @@ def main():
239239
else:
240240
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
241241

242+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
243+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
244+
send_example_telemetry("run_mim", model_args, data_args)
245+
242246
# Setup logging
243247
logging.basicConfig(
244248
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/pytorch/language-modeling/run_clm.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
)
4949
from transformers.testing_utils import CaptureLogger
5050
from transformers.trainer_utils import get_last_checkpoint
51-
from transformers.utils import check_min_version
51+
from transformers.utils import check_min_version, send_example_telemetry
5252
from transformers.utils.versions import require_version
5353

5454

@@ -214,6 +214,10 @@ def main():
214214
else:
215215
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
216216

217+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
218+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
219+
send_example_telemetry("run_clm", model_args, data_args)
220+
217221
# Setup logging
218222
logging.basicConfig(
219223
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/pytorch/language-modeling/run_clm_no_trainer.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
default_data_collator,
5353
get_scheduler,
5454
)
55-
from transformers.utils import get_full_repo_name
55+
from transformers.utils import get_full_repo_name, send_example_telemetry
5656
from transformers.utils.versions import require_version
5757

5858

@@ -239,6 +239,10 @@ def parse_args():
239239
def main():
240240
args = parse_args()
241241

242+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
243+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
244+
send_example_telemetry("run_clm_no_trainer", args)
245+
242246
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
243247
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
244248
# in the environment

examples/pytorch/language-modeling/run_mlm.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@
4747
set_seed,
4848
)
4949
from transformers.trainer_utils import get_last_checkpoint
50-
from transformers.utils import check_min_version
50+
from transformers.utils import check_min_version, send_example_telemetry
5151
from transformers.utils.versions import require_version
5252

5353

@@ -224,6 +224,10 @@ def main():
224224
else:
225225
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
226226

227+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
228+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
229+
send_example_telemetry("run_mlm", model_args, data_args)
230+
227231
# Setup logging
228232
logging.basicConfig(
229233
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

examples/pytorch/language-modeling/run_mlm_no_trainer.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
SchedulerType,
5353
get_scheduler,
5454
)
55-
from transformers.utils import get_full_repo_name
55+
from transformers.utils import get_full_repo_name, send_example_telemetry
5656
from transformers.utils.versions import require_version
5757

5858

@@ -248,6 +248,10 @@ def parse_args():
248248
def main():
249249
args = parse_args()
250250

251+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
252+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
253+
send_example_telemetry("run_mlm_no_trainer", args)
254+
251255
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
252256
# If we're using tracking, we also need to initialize it here and it will by default pick up all supported trackers
253257
# in the environment

examples/pytorch/language-modeling/run_plm.py

+5-1
Original file line numberDiff line numberDiff line change
@@ -42,7 +42,7 @@
4242
set_seed,
4343
)
4444
from transformers.trainer_utils import get_last_checkpoint
45-
from transformers.utils import check_min_version
45+
from transformers.utils import check_min_version, send_example_telemetry
4646
from transformers.utils.versions import require_version
4747

4848

@@ -220,6 +220,10 @@ def main():
220220
else:
221221
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
222222

223+
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
224+
# information sent is the one passed as arguments along with your Python/PyTorch versions.
225+
send_example_telemetry("run_plm", model_args, data_args)
226+
223227
# Setup logging
224228
logging.basicConfig(
225229
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",

0 commit comments

Comments
 (0)