1# Test libllama tokenizer == AutoTokenizer.
  2# Brute force random words/text generation.
  3#
  4# Sample usage:
  5#
  6#   python3 tests/test-tokenizer-random.py ./models/ggml-vocab-llama-bpe.gguf ./models/tokenizers/llama-bpe
  7#
  8
  9from __future__ import annotations
 10
 11import time
 12import logging
 13import argparse
 14import subprocess
 15import random
 16import unicodedata
 17
 18from pathlib import Path
 19from typing import Any, Iterator, cast
 20from typing_extensions import Buffer
 21
 22import cffi
 23from transformers import AutoTokenizer, PreTrainedTokenizer
 24
 25
 26logger = logging.getLogger("test-tokenizer-random")
 27
 28
 29class LibLlama:
 30
 31    DEFAULT_PATH_LLAMA_H = "./include/llama.h"
 32    DEFAULT_PATH_INCLUDES = ["./ggml/include/", "./include/"]
 33    DEFAULT_PATH_LIBLLAMA = "./build/src/libllama.so"  # CMakeLists.txt: BUILD_SHARED_LIBS ON
 34
 35    def __init__(self, path_llama_h: str | None = None, path_includes: list[str] = [], path_libllama: str | None = None):
 36        path_llama_h = path_llama_h or self.DEFAULT_PATH_LLAMA_H
 37        path_includes = path_includes or self.DEFAULT_PATH_INCLUDES
 38        path_libllama = path_libllama or self.DEFAULT_PATH_LIBLLAMA
 39        (self.ffi, self.lib) = self._load_libllama_cffi(path_llama_h, path_includes, path_libllama)
 40        self.lib.llama_backend_init()
 41
 42    def _load_libllama_cffi(self, path_llama_h: str, path_includes: list[str], path_libllama: str) -> tuple[cffi.FFI, Any]:
 43        cmd = ["gcc", "-O0", "-E", "-P", "-D__restrict=", "-D__attribute__(x)=", "-D__asm__(x)="]
 44        cmd += ["-I" + path for path in path_includes] + [path_llama_h]
 45        res = subprocess.run(cmd, stdout=subprocess.PIPE)
 46        assert (res.returncode == 0)
 47        source = res.stdout.decode()
 48        ffi = cffi.FFI()
 49        if True:  # workarounds for pycparser
 50            source = "typedef struct { } __builtin_va_list;" + "\n" + source
 51            source = source.replace("sizeof (int)",    str(ffi.sizeof("int")))
 52            source = source.replace("sizeof (void *)", str(ffi.sizeof("void*")))
 53            source = source.replace("sizeof (size_t)", str(ffi.sizeof("size_t")))
 54            source = source.replace("sizeof(int32_t)", str(ffi.sizeof("int32_t")))
 55        ffi.cdef(source, override=True)
 56        lib = ffi.dlopen(path_libllama)
 57        return (ffi, lib)
 58
 59    def model_default_params(self, **kwargs):
 60        mparams = self.lib.llama_model_default_params()
 61        for k, v in kwargs.items():
 62            setattr(mparams, k, v)
 63        return mparams
 64
 65    def context_default_params(self, **kwargs):
 66        cparams = self.lib.llama_context_default_params()
 67        for k, v in kwargs.items():
 68            setattr(cparams, k, v)
 69        return cparams
 70
 71
 72class LibLlamaModel:
 73
 74    def __init__(self, libllama: LibLlama, path_model: str, mparams={}, cparams={}):
 75        self.lib: Any = libllama.lib
 76        self.ffi = libllama.ffi
 77        if isinstance(mparams, dict):
 78            mparams = libllama.model_default_params(**mparams)
 79        self.model = self.lib.llama_model_load_from_file(path_model.encode(), mparams)
 80        if not self.model:
 81            raise RuntimeError("error: failed to load model '%s'" % path_model)
 82        if isinstance(cparams, dict):
 83            cparams = libllama.context_default_params(**cparams)
 84        self.ctx = self.lib.llama_new_context_with_model(self.model, cparams)
 85        if not self.ctx:
 86            raise RuntimeError("error: failed to create context for model '%s'" % path_model)
 87        n_tokens_max = self.lib.llama_n_ctx(self.ctx)
 88        self.token_ids = self.ffi.new("llama_token[]", n_tokens_max)
 89        self.text_buff = self.ffi.new("uint8_t[]", 1024)
 90
 91    def free(self):
 92        if self.ctx:
 93            self.lib.llama_free(self.ctx)
 94        if self.model:
 95            self.lib.llama_model_free(self.model)
 96        self.ctx = None
 97        self.model = None
 98        self.lib = None
 99
100    def tokenize(self, text: str, add_special: bool = False, parse_special: bool = False) -> list[int]:
101        encoded_text: bytes = text.encode("utf-8")
102        num = self.lib.llama_tokenize(self.model, encoded_text, len(encoded_text), self.token_ids, len(self.token_ids), add_special, parse_special)
103        while num < 0 and len(self.token_ids) < (16 << 20):
104            self.token_ids = self.ffi.new("llama_token[]", -2 * num)
105            num = self.lib.llama_tokenize(self.model, encoded_text, len(encoded_text), self.token_ids, len(self.token_ids), add_special, parse_special)
106        return list(self.token_ids[0:num])
107
108    def detokenize(self, ids: list[int], remove_special: bool = False, unparse_special: bool = False) -> str:
109        if len(self.token_ids) < len(ids):
110            self.token_ids = self.ffi.new("llama_token[]", 2 * len(ids))
111        for i, id in enumerate(ids):
112            self.token_ids[i] = id
113        num = self.lib.llama_detokenize(self.model, self.token_ids, len(ids), self.text_buff, len(self.text_buff), remove_special, unparse_special)
114        while num < 0 and len(self.text_buff) < (16 << 20):
115            self.text_buff = self.ffi.new("uint8_t[]", -2 * num)
116            num = self.lib.llama_detokenize(self.model, self.token_ids, len(ids), self.text_buff, len(self.text_buff), remove_special, unparse_special)
117        return str(cast(Buffer, self.ffi.buffer(self.text_buff, num)), encoding="utf-8", errors="replace")  # replace errors with '\uFFFD'
118
119
120class Tokenizer:
121
122    def encode(self, text: str) -> list[int]:
123        raise NotImplementedError
124
125    def decode(self, ids: list[int]) -> str:
126        raise NotImplementedError
127
128
129class TokenizerGroundtruth (Tokenizer):
130
131    def __init__(self, dir_tokenizer: str):
132        self.model: PreTrainedTokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
133        # guess BOS and EOS
134        ids = self.encode("a")
135        assert 1 <= len(ids) <= 3
136        add_bos_token = len(ids) > 1 and self.model.bos_token_id == ids[0]
137        add_eos_token = len(ids) > 1 and self.model.eos_token_id == ids[-1]
138        self.add_bos_token = getattr(self.model, "add_bos_token", add_bos_token)
139        self.add_eos_token = getattr(self.model, "add_eos_token", add_eos_token)
140        # build vocab
141        tokens = list(self.model.get_vocab().values())
142        self.vocab = self.model.batch_decode(tokens, skip_special_tokens=True)
143        self.vocab = list(sorted(self.vocab))
144        # tokens and lists
145        self.special_tokens = list(self.model.all_special_tokens)
146        self.added_tokens   = self.model.batch_decode(self.model.added_tokens_encoder.values(), skip_special_tokens=False)
147        self.bos_token = self.model.bos_token
148        self.eos_token = self.model.eos_token
149
150    def encode(self, text: str) -> list[int]:
151        return self.model.encode(text, add_special_tokens=True)
152
153    def decode(self, ids: list[int]) -> str:
154        return self.model.decode(ids, skip_special_tokens=False)
155
156
157class TokenizerLlamaCpp (Tokenizer):
158
159    libllama: LibLlama | None = None
160
161    def __init__(self, vocab_file: str):
162        if not self.libllama:
163            self.libllama = LibLlama()
164        self.model = LibLlamaModel(self.libllama, vocab_file, mparams=dict(vocab_only=True), cparams=dict(n_ctx=4096))
165
166    def encode(self, text: str) -> list[int]:
167        return self.model.tokenize(text, add_special=True, parse_special=True)
168
169    def decode(self, ids: list[int]) -> str:
170        return self.model.detokenize(ids, remove_special=False, unparse_special=True)
171
172
173def generator_custom_text() -> Iterator[str]:
174    """General tests"""
175    yield from [
176        "",
177        " ",
178        "  ",
179        "   ",
180        "\t",
181        "\n",
182        "\n\n",
183        "\n\n\n",
184        "\t\n",
185        "Hello world",
186        " Hello world",
187        "Hello World",
188        " Hello World",
189        " Hello World!",
190        "Hello, world!",
191        " Hello, world!",
192        " this is ๐Ÿฆ™.cpp",
193        "w048 7tuijk dsdfhu",
194        "ะฝะตั‰ะพ ะฝะฐ ะ‘ัŠะปะณะฐั€ัะบะธ",
195        "แž€แžถแž“แŸ‹แžแŸ‚แž–แžทแžŸแŸแžŸแžขแžถแž…แžแž›แž…แŸแž‰",
196        "๐Ÿš€ (normal) ๐Ÿ˜ถโ€๐ŸŒซ๏ธ (multiple emojis concatenated) โœ… (only emoji that has its own token)",
197        "Hello",
198        " Hello",
199        "  Hello",
200        "   Hello",
201        "    Hello",
202        "    Hello\n    Hello",
203        " (",
204        "\n =",
205        "' era",
206        "Hello, y'all! How are you ๐Ÿ˜ ?ๆˆ‘ๆƒณๅœจappleๅทฅไฝœ1314151ๅคฉ๏ฝž",
207        "3",
208        "33",
209        "333",
210        "3333",
211        "33333",
212        "333333",
213        "3333333",
214        "33333333",
215        "333333333",
216    ]
217
218
219def generator_custom_text_edge_cases() -> Iterator[str]:
220    """Edge cases found while debugging"""
221    yield from [
222        '\x1f-a',     # unicode_ranges_control, {0x00001C, 0x00001F}
223        'ยผ-a',        # unicode_ranges_digit, 0x00BC
224        'ยฝ-a',        # unicode_ranges_digit, 0x00BD
225        'ยพ-a',        # unicode_ranges_digit, 0x00BE
226        'a ใ€‡b',      # unicode_ranges_digit, 0x3007
227        'โ…ฅ-a',       # unicode_ranges_digit, {0x00002150, 0x0000218F} // Number Forms
228        '\uFEFF//',   # unicode_ranges_control, 0xFEFF (BOM)
229        'Cแปญa Viแป‡t',   # llama-3, ignore_merges = true
230        '<s>a',       # Phi-3 fail
231        '<unk><|endoftext|><s>',  # Phi-3 fail
232        'a\na',            # bert fail
233        '"`',              # falcon
234        ' \u2e4e',         # falcon
235        '\n\x0b  ',        # falcon
236        'a\xa0\xa0\x00b',  # jina-v2-es
237        'one <mask>',      # jina-v2-es  <mask> lstrip=true
238        'a </s> b',        # rstrip phi-3
239        'a <mask> b',      # lstrip jina-v2
240        '\xa0aC',          # deepseek
241        '\u2029 \uA3E4',   # deepseek-llm
242        "a ?",
243        'aฬŠ',               # mpt
244        '\U000ac517',      # utf-8 encode error, falcon
245        '\U000522f4',      # utf-8 encode error, starcoder
246        "<s><s><unk><s>a<s>b<s>c<unk>d<unk></s>",
247        "<s> <s> <unk><s>a<s>b<s>c<unk>d<unk></s>",
248    ]
249
250
251def generator_vocab_words(tokenizer: TokenizerGroundtruth) -> Iterator[str]:
252    """Brute force check all vocab words"""
253    yield from tokenizer.vocab
254
255
256def generator_ascii_lr_strip() -> Iterator[str]:
257    WHITESPACES = ["", " ", "  "]
258    CHARACTERS = list(chr(i) for i in range(1, 0x80)) + [""]
259    for char1 in CHARACTERS:
260        for char2 in CHARACTERS:
261            for lstrip in WHITESPACES:
262                for rstrip in WHITESPACES:
263                    yield lstrip + char1 + char2 + rstrip
264                    yield lstrip + char1 + rstrip + char2
265                    yield char1 + lstrip + char2 + rstrip
266
267
268def generator_apostrophe() -> Iterator[str]:
269    WHITESPACES = ["", " ", "  "]
270    CHARACTERS = list(chr(i) for i in range(1, 0x80)) + [""]
271    for char1 in CHARACTERS:
272        for char2 in CHARACTERS:
273            for lstrip in WHITESPACES:
274                for rstrip in WHITESPACES:
275                    yield char1 + lstrip + "'" + rstrip + char2
276                    yield char1 + char2 + lstrip + "'" + rstrip + "z"
277                    yield "a" + lstrip + "'" + rstrip + char1 + char2
278
279
280def generator_added_lr_strip(tokenizer: TokenizerGroundtruth) -> Iterator[str]:
281    WHITESPACES = ["", " ", "  ", "\n", "\r\n", "\n\n", "\t", "\t\t"]
282    all_tokens = list(sorted(set(tokenizer.special_tokens + tokenizer.added_tokens)))
283    for token in all_tokens:
284        for lstrip in WHITESPACES:
285            for rstrip in WHITESPACES:
286                yield lstrip + token + rstrip
287                yield "a" + lstrip + token + rstrip
288                yield lstrip + token + rstrip + "z"
289                yield "a" + lstrip + token + rstrip + "z"
290
291
292def generator_random_added_tokens(tokenizer: TokenizerGroundtruth, iterations=100) -> Iterator[str]:
293    separations = [" ", "\n", "\t", "-", "!", "one", "1", "<s>", "</s>"]
294    all_tokens  = list(sorted(set(tokenizer.special_tokens + tokenizer.added_tokens + separations)))
295    rand = random.Random()
296    for m in range(iterations):
297        rand.seed(m)
298        words = rand.choices(all_tokens, k=500)
299        if words and words[0] == tokenizer.bos_token:  # skip spam warning of double BOS
300            while len(words) > 1 and words[1] == tokenizer.bos_token:  # leave one starting BOS
301                words.pop(0)
302            if tokenizer.add_bos_token:  # drop all starting BOS
303                words.pop(0)
304        if words and words[-1] == tokenizer.eos_token:  # skip spam warning of double EOS
305            while len(words) > 1 and words[-2] == tokenizer.eos_token:  # leave one trailing EOS
306                words.pop(-1)
307            if tokenizer.add_bos_token:  # drop all trailing EOS
308                words.pop(-1)
309        yield "".join(words)
310
311
312def generator_random_chars(iterations=100) -> Iterator[str]:
313    """Brute force random text with simple characters"""
314
315    NUM_WORDS = 400
316    WHITESPACES = list(" " * 20 + "\n" * 5 + "\r\n" * 5 + "\t" * 5)
317    CHARS = list(sorted(set("""
318        ABCDEFGHIJKLMNOPQRSTUVWXYZ
319        abcdefghijklmnopqrstuvwxyz
320        รร‰รร“รšร€รˆรŒร’ร™ร‚รŠรŽร”ร›ร„ร‹รร–รœ
321        รกรฉรญรณรบร รจรฌรฒรนรขรชรฎรดรปรครซรฏรถรผ
322        .-,*/-+ยช!"ยท$%&/()=?ยฟ[]{}<>\\|@#~ยฝยฌ~;:_
323    """)))
324
325    rand = random.Random()
326    for m in range(iterations):
327        rand.seed(m)
328        text = []
329        for _ in range(NUM_WORDS):
330            k = rand.randint(1, 7)
331            word = rand.choices(CHARS, k=k)
332            word.append(rand.choice(WHITESPACES))
333            text.append("".join(word))
334        yield "".join(text)
335
336
337def generator_unicodes() -> Iterator[str]:
338    """Iterate unicode characters"""
339
340    MAX_CODEPOINTS = 0x30000  # 0x110000
341
342    def _valid(cpt):
343        if cpt >= 0x30000:  # unassigned and supplementยญary
344            return False
345        # if cpt == 0x2029:  # deepseek-llm
346        #    return False
347        if unicodedata.category(chr(cpt)) in ("Cn", "Cs", "Co"):  # undefined, surrogates, private
348            return False
349        return True
350
351    characters = [chr(cpt) for cpt in range(0, MAX_CODEPOINTS) if _valid(cpt)]
352
353    yield from characters
354
355
356def generator_random_unicodes(iterations=100) -> Iterator[str]:
357    """Brute force random text with unicode characters"""
358
359    NUM_WORDS = 200
360    WHITESPACES = list(" " * 20 + "\n" * 5 + "\r\n" * 5 + "\t" * 5)
361
362    characters = list(generator_unicodes())
363
364    rand = random.Random()
365    for m in range(iterations):
366        rand.seed(m)
367        text = []
368        for _ in range(NUM_WORDS):
369            k = rand.randint(1, 7)
370            word = rand.choices(characters, k=k)
371            word.append(rand.choice(WHITESPACES))
372            text.append("".join(word))
373        yield "".join(text)
374
375
376def generator_random_vocab_chars(tokenizer: TokenizerGroundtruth, iterations=100) -> Iterator[str]:
377    """Brute force random text with vocab characters"""
378
379    vocab_chars = set()
380    for word in tokenizer.vocab:
381        vocab_chars.update(word)
382    vocab_chars = list(sorted(vocab_chars))
383
384    rand = random.Random()
385    for m in range(iterations):
386        rand.seed(m)
387        text = rand.choices(vocab_chars, k=1024)
388        yield "".join(text)
389
390
391def generator_random_vocab_words(tokenizer: TokenizerGroundtruth, iterations=100) -> Iterator[str]:
392    """Brute force random text from vocab words"""
393
394    vocab = [w.strip() for w in tokenizer.vocab]
395    yield from vocab
396
397    rand = random.Random()
398    for m in range(iterations):
399        rand.seed(m)
400        text = []
401        num_words = rand.randint(300, 400)
402        for i in range(num_words):
403            k = rand.randint(1, 3)
404            words = rand.choices(vocab, k=k)
405            sep = rand.choice("     \n\r\t")
406            text.append("".join(words) + sep)
407        yield "".join(text)
408
409
410def compare_tokenizers(tokenizer1: TokenizerGroundtruth, tokenizer2: TokenizerLlamaCpp, generator: Iterator[str]):
411
412    def find_first_mismatch(ids1: list[int] | str, ids2: list[int] | str):
413        for i, (a, b) in enumerate(zip(ids1, ids2)):
414            if a != b:
415                return i
416        if len(ids1) == len(ids2):
417            return -1
418        return min(len(ids1), len(ids2))
419
420    def check_detokenizer(text: str, text1: str, text2: str) -> bool:
421        if text1 == text2:  # equal to TokenizerGroundtruth?
422            return True
423        # equal to source text?
424        if tokenizer1.add_bos_token and tokenizer1.bos_token and isinstance(tokenizer1.bos_token, str):  # remove BOS
425            if text2.startswith(tokenizer1.bos_token):
426                text2 = text2[len(tokenizer1.bos_token):]
427        if tokenizer1.add_eos_token and tokenizer1.eos_token and isinstance(tokenizer1.eos_token, str):  # remove EOS
428            if text2.endswith(tokenizer1.eos_token):
429                text2 = text2[:-len(tokenizer1.eos_token)]
430        return text == text2
431
432    t_encode1 = 0
433    t_encode2 = 0
434    t_decode1 = 0
435    t_decode2 = 0
436    t_start = time.perf_counter()
437    encode_errors = 0
438    decode_errors = 0
439    MAX_ERRORS = 10
440
441    logger.info("%s: %s" % (generator.__qualname__, "ini"))
442    for text in generator:
443        # print(repr(text), text.encode())
444        # print(repr(text), hex(ord(text[0])), text.encode())
445        t0 = time.perf_counter()
446        ids1 = tokenizer1.encode(text)
447        t1 = time.perf_counter()
448        ids2 = tokenizer2.encode(text)
449        t2 = time.perf_counter()
450        text1 = tokenizer1.decode(ids1)
451        t3 = time.perf_counter()
452        text2 = tokenizer2.decode(ids1)
453        t4 = time.perf_counter()
454        t_encode1 += t1 - t0
455        t_encode2 += t2 - t1
456        t_decode1 += t3 - t2
457        t_decode2 += t4 - t3
458        if encode_errors < MAX_ERRORS and ids1 != ids2:
459            i = find_first_mismatch(ids1, ids2)
460            ids1 = list(ids1)[max(0, i - 2) : i + 5 + 1]
461            ids2 = list(ids2)[max(0, i - 2) : i + 5 + 1]
462            logger.error(" Expected: " + str(ids1))
463            logger.error("   Result: " + str(ids2))
464            encode_errors += 1
465            logger.error(f" {encode_errors=}")
466        if decode_errors < MAX_ERRORS and not check_detokenizer(text, text1, text2):
467            i = find_first_mismatch(text1, text2)
468            text1 = list(text1[max(0, i - 2) : i + 5 + 1])
469            text2 = list(text2[max(0, i - 2) : i + 5 + 1])
470            logger.error(" Expected: " + " ".join(hex(ord(x)) for x in text1))
471            logger.error("   Result: " + " ".join(hex(ord(x)) for x in text2))
472            decode_errors += 1
473            logger.error(f" {decode_errors=}")
474        if encode_errors >= MAX_ERRORS and decode_errors >= MAX_ERRORS:
475            logger.error(f" EXIT: {encode_errors=} {decode_errors=}")
476            # raise Exception()
477            break
478
479    t_total = time.perf_counter() - t_start
480    logger.info(f"{generator.__qualname__}: end,  {t_encode1=:.3f} {t_encode2=:.3f}  {t_decode1=:.3f} {t_decode2=:.3f}  {t_total=:.3f}")
481
482
483def main(argv: list[str] | None = None):
484    parser = argparse.ArgumentParser()
485    parser.add_argument("vocab_file", type=str, help="path to vocab 'gguf' file")
486    parser.add_argument("dir_tokenizer", type=str, help="directory containing 'tokenizer.model' file")
487    parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
488    args = parser.parse_args(argv)
489
490    logging.basicConfig(level = logging.DEBUG if args.verbose else logging.INFO)
491    logger.info(f"VOCABFILE: '{args.vocab_file}'")
492
493    tokenizer1 = TokenizerGroundtruth(args.dir_tokenizer)
494    tokenizer2 = TokenizerLlamaCpp(args.vocab_file)
495
496    # compare_tokenizers(tokenizer1, tokenizer2, generator_custom_text())
497    # compare_tokenizers(tokenizer1, tokenizer2, generator_custom_text_edge_cases())
498    compare_tokenizers(tokenizer1, tokenizer2, generator_ascii_lr_strip())
499    compare_tokenizers(tokenizer1, tokenizer2, generator_apostrophe())
500    compare_tokenizers(tokenizer1, tokenizer2, generator_unicodes())
501    compare_tokenizers(tokenizer1, tokenizer2, generator_vocab_words(tokenizer1))
502    compare_tokenizers(tokenizer1, tokenizer2, generator_added_lr_strip(tokenizer1))
503    # compare_tokenizers(tokenizer1, tokenizer2, generator_random_added_tokens(tokenizer1, 10_000))
504    # compare_tokenizers(tokenizer1, tokenizer2, generator_random_chars(10_000))
505    # compare_tokenizers(tokenizer1, tokenizer2, generator_random_unicodes(10_000))
506    # compare_tokenizers(tokenizer1, tokenizer2, generator_random_vocab_chars(tokenizer1, 10_000))
507    # compare_tokenizers(tokenizer1, tokenizer2, generator_random_vocab_words(tokenizer1, 5_000))
508
509    tokenizer2.model.free()
510
511
512if __name__ == "__main__":
513    # main()
514
515    if True:
516        logging.basicConfig(
517            level    = logging.DEBUG,
518            format   = "%(asctime)s.%(msecs)03d %(name)s %(levelname)s %(message)s",
519            datefmt  = "%Y-%m-%d %H:%M:%S",
520            filename = logger.name + ".log",
521            filemode = "a"
522        )
523    logging.basicConfig(
524        level    = logging.DEBUG,
525        format   = "%(levelname)s %(message)s",
526    )
527
528    path_tokenizers   = Path("./models/tokenizers/")
529    path_vocab_format = "./models/ggml-vocab-%s.gguf"
530
531    tokenizers = [
532        "llama-spm",      # SPM
533        "phi-3",          # SPM
534        "gemma",          # SPM
535        "gemma-2",        # SPM
536        "baichuan",       # SPM
537        "bert-bge",       # WPM
538        "jina-v2-en",     # WPM
539        "llama-bpe",      # BPE
540        "phi-2",          # BPE
541        "deepseek-llm",   # BPE
542        "deepseek-coder", # BPE
543        "falcon",         # BPE
544        "mpt",            # BPE
545        "starcoder",      # BPE
546        "gpt-2",          # BPE
547        "stablelm2",      # BPE
548        "refact",         # BPE
549        "qwen2",          # BPE
550        "olmo",           # BPE
551        "jina-v2-es",     # BPE
552        "jina-v2-de",     # BPE
553        "smaug-bpe",      # BPE
554        "poro-chat",      # BPE
555        "jina-v2-code",   # BPE
556        "viking",         # BPE
557        "jais",           # BPE
558    ]
559
560    logger.info("=" * 50)
561    for tokenizer in tokenizers:
562        logger.info("-" * 50)
563        logger.info(f"TOKENIZER: '{tokenizer}'")
564        vocab_file = Path(path_vocab_format % tokenizer)
565        dir_tokenizer = path_tokenizers / tokenizer
566        main([str(vocab_file), str(dir_tokenizer), "--verbose"])