summaryrefslogtreecommitdiff
path: root/tests/test-tokenizer-0-llama.py
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-04-29 16:58:41 +0300
committerGitHub <noreply@github.com>2024-04-29 16:58:41 +0300
commitf4ab2a41476600a98067a9474ea8f9e6db41bcfa (patch)
tree4e840ec5b4243ed43906a576e396995e3d9dbc21 /tests/test-tokenizer-0-llama.py
parent3f167476b11efa7ab08f6cacdeb8cab0935c1249 (diff)
llama : fix BPE pre-tokenization (#6920)
* merged the changes from deepseeker models to main branch * Moved regex patterns to unicode.cpp and updated unicode.h * Moved header files * Resolved issues * added and refactored unicode_regex_split and related functions * Updated/merged the deepseek coder pr * Refactored code * Adding unicode regex mappings * Adding unicode regex function * Added needed functionality, testing remains * Fixed issues * Fixed issue with gpt2 regex custom preprocessor * unicode : fix? unicode_wstring_to_utf8 * lint : fix whitespaces * tests : add tokenizer tests for numbers * unicode : remove redundant headers * tests : remove and rename tokenizer test scripts * tests : add sample usage * gguf-py : reader prints warnings on duplicate keys * llama : towards llama3 tokenization support (wip) * unicode : shot in the dark to fix tests on Windows * unicode : first try custom implementations * convert : add "tokenizer.ggml.pre" GGUF KV (wip) * llama : use new pre-tokenizer type * convert : fix pre-tokenizer type writing * lint : fix * make : add test-tokenizer-0-llama-v3 * wip * models : add llama v3 vocab file * llama : adapt punctuation regex + add llama 3 regex * minor * unicode : set bomb * unicode : set bomb * unicode : always use std::wregex * unicode : support \p{N}, \p{L} and \p{P} natively * unicode : try fix windows * unicode : category support via std::regex * unicode : clean-up * unicode : simplify * convert : add convert-hf-to-gguf-update.py ggml-ci * lint : update * convert : add falcon ggml-ci * unicode : normalize signatures * lint : fix * lint : fix * convert : remove unused functions * convert : add comments * convert : exercise contractions ggml-ci * lint : fix * cmake : refactor test targets * tests : refactor vocab tests ggml-ci * tests : add more vocabs and tests ggml-ci * unicode : cleanup * scripts : ignore new update script in check-requirements.sh * models : add phi-3, mpt, gpt-2, starcoder * tests : disable obsolete ggml-ci * tests : use faster bpe test ggml-ci * llama : more prominent warning for old BPE models * tests : disable test-tokenizer-1-bpe due to slowness ggml-ci --------- Co-authored-by: Jaggzh <jaggz.h@gmail.com> Co-authored-by: Kazim Abrar Mahi <kazimabrarmahi135@gmail.com>
Diffstat (limited to 'tests/test-tokenizer-0-llama.py')
-rw-r--r--tests/test-tokenizer-0-llama.py92
1 files changed, 0 insertions, 92 deletions
diff --git a/tests/test-tokenizer-0-llama.py b/tests/test-tokenizer-0-llama.py
deleted file mode 100644
index f3d4d7e3..00000000
--- a/tests/test-tokenizer-0-llama.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# tests with SPM tokenizer
-
-import argparse
-
-from sentencepiece import SentencePieceProcessor
-
-parser = argparse.ArgumentParser()
-parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
-parser.add_argument("--fname-tok", help="path to a text file to tokenize")
-args = parser.parse_args()
-
-dir_tokenizer = args.dir_tokenizer
-
-tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model')
-
-tests = [
- "",
- " ",
- " ",
- " ",
- "\t",
- "\n",
- "\t\n",
- "Hello world",
- " Hello world",
- "Hello World",
- " Hello World",
- " Hello World!",
- "Hello, world!",
- " Hello, world!",
- " this is πŸ¦™.cpp",
- "w048 7tuijk dsdfhu",
- "Π½Π΅Ρ‰ΠΎ Π½Π° Π‘ΡŠΠ»Π³Π°Ρ€ΡΠΊΠΈ",
- "αž€αžΆαž“αŸ‹αžαŸ‚αž–αž·αžŸαŸαžŸαž’αžΆαž…αžαž›αž…αŸαž‰",
- "πŸš€ (normal) πŸ˜Άβ€πŸŒ«οΈ (multiple emojis concatenated) βœ… (only emoji that has its own token)",
- "Hello",
- " Hello",
- " Hello",
- " Hello",
- " Hello",
- " Hello\n Hello",
-]
-
-
-for text in tests:
- print('text: ', text)
- print('\nwith bos:')
- print(tokenizer.encode(text, add_bos=True))
- print(tokenizer.decode(tokenizer.encode(text, add_bos=True)))
- print('\nwithout bos:')
- print(tokenizer.encode(text, add_bos=False))
- print(tokenizer.decode(tokenizer.encode(text, add_bos=False)))
-
-print("'" + tokenizer.id_to_piece(15043) + "'") # '_Hello'
-print("'" + tokenizer.id_to_piece(29871) + "'") # '_'
-print("'" + tokenizer.decode([15043]) + "'") # 'Hello'
-print("'" + tokenizer.decode([15043, 15043]) + "'") # 'Hello Hello'
-print("'" + tokenizer.decode([29871, 15043]) + "'") # ' Hello'
-print("'" + tokenizer.decode([29871, 15043, 29871, 15043]) + "'") # ' Hello Hello'
-
-print("\n\ntests for C++:\n")
-for text in tests:
- res = tokenizer.encode(text, add_bos=False)
-
- k = text.replace('\n', '\\n')
- k = k.replace('\t', '\\t')
- k = '"' + k + '"'
- print("{ %-24s, { " % k, end='')
- for x in res:
- print("%7d," % x, end='')
- print(" }, },")
-
-print(tokenizer.encode('hello'))
-print(tokenizer.encode('world'))
-print(tokenizer.encode(' world'))
-print(tokenizer.encode('hello world'))
-
-fname_tok = args.fname_tok
-if fname_tok:
- print('tokenizing file: ', fname_tok)
- fname_out = fname_tok + '.tok'
- with open(fname_tok, 'r', encoding='utf-8') as f:
- lines = f.readlines()
- s = ''.join(lines)
- res = tokenizer.encode(s, add_bos=True)
- # write to file
- with open(fname_out, 'w', encoding='utf-8') as f:
- for x in res:
- f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
- print('len(res): ', len(res))
- print('len(lines): ', len(lines))
- print('results written to: ', fname_out)