1import time
2import argparse
3
4from transformers import AutoTokenizer
5
6parser = argparse.ArgumentParser()
7parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
8parser.add_argument("--fname-tok", help="path to a text file to tokenize", required=True)
9args = parser.parse_args()
10
11dir_tokenizer = args.dir_tokenizer
12fname_tok = args.fname_tok
13
14tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
15
16print('tokenizing file: ', fname_tok) # noqa: NP100
17fname_out = fname_tok + '.tok'
18with open(fname_tok, 'r', encoding='utf-8') as f:
19 lines = f.readlines()
20 s = ''.join(lines)
21 t_start = time.time()
22 res = tokenizer.encode(s, add_special_tokens=False)
23 t_end = time.time()
24 print('\nmain : tokenized in', "{:.3f}".format(1000.0 * (t_end - t_start)), 'ms (py)') # noqa: NP100
25 with open(fname_out, 'w', encoding='utf-8') as f:
26 for x in res:
27 # LLaMA v3 for some reason strips the space for these tokens (and others)
28 # if x == 662:
29 # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
30 # elif x == 1174:
31 # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
32 # elif x == 2564:
33 # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
34 # elif x == 758:
35 # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
36 # elif x == 949:
37 # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
38 # elif x == 5354:
39 # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
40 # else:
41 # f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
42 # f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
43 f.write(str(x) + '\n')
44 print('len(res): ', len(res)) # noqa: NP100
45 print('len(lines): ', len(lines)) # noqa: NP100
46print('results written to: ', fname_out) # noqa: NP100