Skip to content

Commit 64b4d25

Browse files
authored
[fsmt test] basic config test with online model + super tiny model (#7860)
* basic config test with online model * typo * style * better test
1 parent 3479787 commit 64b4d25

File tree

3 files changed

+105
-5
lines changed

3 files changed

+105
-5
lines changed
+74
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
#!/usr/bin/env python
2+
# coding: utf-8
3+
4+
# This script creates a super tiny model that is useful inside tests, when we just want to test that
5+
# the machinery works, without needing to the check the quality of the outcomes.
6+
#
7+
# This version creates a tiny vocab first, and then a tiny model - so the outcome is truly tiny -
8+
# all files ~60KB. As compared to taking a full-size model, reducing to the minimum its layers and
9+
# emb dimensions, but keeping the full vocab + merges files, leading to ~3MB in total for all files.
10+
# The latter is done by `fsmt-make-super-tiny-model.py`.
11+
#
12+
# It will be used then as "stas/tiny-wmt19-en-ru"
13+
14+
from pathlib import Path
15+
import json
16+
import tempfile
17+
18+
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
19+
from transformers.tokenization_fsmt import VOCAB_FILES_NAMES
20+
21+
mname_tiny = "tiny-wmt19-en-ru"
22+
23+
# Build
24+
25+
# borrowed from a test
26+
vocab = [ "l", "o", "w", "e", "r", "s", "t", "i", "d", "n", "w</w>", "r</w>", "t</w>", "lo", "low", "er</w>", "low</w>", "lowest</w>", "newer</w>", "wider</w>", "<unk>", ]
27+
vocab_tokens = dict(zip(vocab, range(len(vocab))))
28+
merges = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
29+
30+
with tempfile.TemporaryDirectory() as tmpdirname:
31+
build_dir = Path(tmpdirname)
32+
src_vocab_file = build_dir / VOCAB_FILES_NAMES["src_vocab_file"]
33+
tgt_vocab_file = build_dir / VOCAB_FILES_NAMES["tgt_vocab_file"]
34+
merges_file = build_dir / VOCAB_FILES_NAMES["merges_file"]
35+
with open(src_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
36+
with open(tgt_vocab_file, "w") as fp: fp.write(json.dumps(vocab_tokens))
37+
with open(merges_file, "w") as fp : fp.write("\n".join(merges))
38+
39+
tokenizer = FSMTTokenizer(
40+
langs=["en", "ru"],
41+
src_vocab_size = len(vocab),
42+
tgt_vocab_size = len(vocab),
43+
src_vocab_file=src_vocab_file,
44+
tgt_vocab_file=tgt_vocab_file,
45+
merges_file=merges_file,
46+
)
47+
48+
config = FSMTConfig(
49+
langs=['ru', 'en'],
50+
src_vocab_size=1000, tgt_vocab_size=1000,
51+
d_model=4,
52+
encoder_layers=1, decoder_layers=1,
53+
encoder_ffn_dim=4, decoder_ffn_dim=4,
54+
encoder_attention_heads=1, decoder_attention_heads=1,
55+
)
56+
57+
tiny_model = FSMTForConditionalGeneration(config)
58+
print(f"num of params {tiny_model.num_parameters()}")
59+
60+
# Test
61+
batch = tokenizer.prepare_seq2seq_batch(["Making tiny model"])
62+
outputs = tiny_model(**batch, return_dict=True)
63+
64+
print("test output:", len(outputs.logits[0]))
65+
66+
# Save
67+
tiny_model.half() # makes it smaller
68+
tiny_model.save_pretrained(mname_tiny)
69+
tokenizer.save_pretrained(mname_tiny)
70+
71+
print(f"Generated {mname_tiny}")
72+
73+
# Upload
74+
# transformers-cli upload tiny-wmt19-en-ru

scripts/fsmt/fsmt-make-tiny-model.py

+18-5
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,19 @@
11
#!/usr/bin/env python
22
# coding: utf-8
33

4-
# this script creates a tiny model that is useful inside tests, when we just want to test that the machinery works,
5-
# without needing to the check the quality of the outcomes.
6-
# it will be used then as "stas/tiny-wmt19-en-de"
4+
# This script creates a super tiny model that is useful inside tests, when we just want to test that
5+
# the machinery works, without needing to the check the quality of the outcomes.
6+
#
7+
# This version creates a tiny model through reduction of a normal pre-trained model, but keeping the
8+
# full vocab, merges file, and thus also resulting in a larger model due to a large vocab size.
9+
# This gives ~3MB in total for all files.
10+
#
11+
# If you want a 50 times smaller than this see `fsmt-make-super-tiny-model.py`, which is slightly more complicated
12+
#
13+
#
14+
# It will be used then as "stas/tiny-wmt19-en-de"
715

16+
# Build
817
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
918
mname = "facebook/wmt19-en-de"
1019
tokenizer = FSMTTokenizer.from_pretrained(mname)
@@ -18,16 +27,20 @@
1827

1928
tiny_model = FSMTForConditionalGeneration(config)
2029
print(f"num of params {tiny_model.num_parameters()}")
21-
# Test it
30+
31+
# Test
2232
batch = tokenizer.prepare_seq2seq_batch(["Making tiny model"])
2333
outputs = tiny_model(**batch, return_dict=True)
2434

25-
print(len(outputs.logits[0]))
35+
print("test output:", len(outputs.logits[0]))
36+
2637
# Save
2738
mname_tiny = "tiny-wmt19-en-de"
2839
tiny_model.half() # makes it smaller
2940
tiny_model.save_pretrained(mname_tiny)
3041
tokenizer.save_pretrained(mname_tiny)
3142

43+
print(f"Generated {mname_tiny}")
44+
3245
# Upload
3346
# transformers-cli upload tiny-wmt19-en-de

tests/test_tokenization_fsmt.py

+13
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,10 @@
2525
from .test_tokenization_common import TokenizerTesterMixin
2626

2727

28+
# using a different tiny model than the one used for default params defined in init to ensure proper testing
29+
FSMT_TINY2 = "stas/tiny-wmt19-en-ru"
30+
31+
2832
class FSMTTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
2933
tokenizer_class = FSMTTokenizer
3034

@@ -86,6 +90,15 @@ def tokenizer_ru_en(self):
8690
def tokenizer_en_ru(self):
8791
return FSMTTokenizer.from_pretrained("facebook/wmt19-en-ru")
8892

93+
def test_online_tokenizer_config(self):
94+
"""this just tests that the online tokenizer files get correctly fetched and
95+
loaded via its tokenizer_config.json and it's not slow so it's run by normal CI
96+
"""
97+
tokenizer = FSMTTokenizer.from_pretrained(FSMT_TINY2)
98+
self.assertListEqual([tokenizer.src_lang, tokenizer.tgt_lang], ["en", "ru"])
99+
self.assertEqual(tokenizer.src_vocab_size, 21)
100+
self.assertEqual(tokenizer.tgt_vocab_size, 21)
101+
89102
def test_full_tokenizer(self):
90103
""" Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt """
91104
tokenizer = FSMTTokenizer(self.langs, self.src_vocab_file, self.tgt_vocab_file, self.merges_file)

0 commit comments

Comments
 (0)