summaryrefslogtreecommitdiff
path: root/tests/test-tokenizer-1-spm.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-04-29 16:58:41 +0300
committerGitHub <noreply@github.com>2024-04-29 16:58:41 +0300
commitf4ab2a41476600a98067a9474ea8f9e6db41bcfa (patch)
tree4e840ec5b4243ed43906a576e396995e3d9dbc21 /tests/test-tokenizer-1-spm.cpp
parent3f167476b11efa7ab08f6cacdeb8cab0935c1249 (diff)
llama : fix BPE pre-tokenization (#6920)
* merged the changes from deepseeker models to main branch * Moved regex patterns to unicode.cpp and updated unicode.h * Moved header files * Resolved issues * added and refactored unicode_regex_split and related functions * Updated/merged the deepseek coder pr * Refactored code * Adding unicode regex mappings * Adding unicode regex function * Added needed functionality, testing remains * Fixed issues * Fixed issue with gpt2 regex custom preprocessor * unicode : fix? unicode_wstring_to_utf8 * lint : fix whitespaces * tests : add tokenizer tests for numbers * unicode : remove redundant headers * tests : remove and rename tokenizer test scripts * tests : add sample usage * gguf-py : reader prints warnings on duplicate keys * llama : towards llama3 tokenization support (wip) * unicode : shot in the dark to fix tests on Windows * unicode : first try custom implementations * convert : add "tokenizer.ggml.pre" GGUF KV (wip) * llama : use new pre-tokenizer type * convert : fix pre-tokenizer type writing * lint : fix * make : add test-tokenizer-0-llama-v3 * wip * models : add llama v3 vocab file * llama : adapt punctuation regex + add llama 3 regex * minor * unicode : set bomb * unicode : set bomb * unicode : always use std::wregex * unicode : support \p{N}, \p{L} and \p{P} natively * unicode : try fix windows * unicode : category support via std::regex * unicode : clean-up * unicode : simplify * convert : add convert-hf-to-gguf-update.py ggml-ci * lint : update * convert : add falcon ggml-ci * unicode : normalize signatures * lint : fix * lint : fix * convert : remove unused functions * convert : add comments * convert : exercise contractions ggml-ci * lint : fix * cmake : refactor test targets * tests : refactor vocab tests ggml-ci * tests : add more vocabs and tests ggml-ci * unicode : cleanup * scripts : ignore new update script in check-requirements.sh * models : add phi-3, mpt, gpt-2, starcoder * tests : disable obsolete ggml-ci * tests : use faster bpe test ggml-ci * llama : more prominent warning for old BPE models * tests : disable test-tokenizer-1-bpe due to slowness ggml-ci --------- Co-authored-by: Jaggzh <jaggz.h@gmail.com> Co-authored-by: Kazim Abrar Mahi <kazimabrarmahi135@gmail.com>
Diffstat (limited to 'tests/test-tokenizer-1-spm.cpp')
-rw-r--r--tests/test-tokenizer-1-spm.cpp111
1 files changed, 111 insertions, 0 deletions
diff --git a/tests/test-tokenizer-1-spm.cpp b/tests/test-tokenizer-1-spm.cpp
new file mode 100644
index 00000000..ac2333dd
--- /dev/null
+++ b/tests/test-tokenizer-1-spm.cpp
@@ -0,0 +1,111 @@
+#include "llama.h"
+#include "common.h"
+#include "unicode.h"
+#include "console.h"
+
+#include <cassert>
+#include <codecvt>
+#include <cstdio>
+#include <cstring>
+#include <locale>
+#include <string>
+#include <thread>
+#include <vector>
+
+int main(int argc, char ** argv) {
+ if (argc < 2) {
+ fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
+ return 1;
+ }
+
+ const std::string fname = argv[1];
+
+ fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
+
+ llama_model * model;
+ llama_context * ctx;
+
+ llama_backend_init();
+
+ // load the vocab
+ {
+ auto mparams = llama_model_default_params();
+
+ mparams.vocab_only = true;
+
+ model = llama_load_model_from_file(fname.c_str(), mparams);
+
+ if (model == NULL) {
+ fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
+ return 1;
+ }
+
+ auto cparams = llama_context_default_params();
+
+ ctx = llama_new_context_with_model(model, cparams);
+
+ if (ctx == NULL) {
+ fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
+ llama_free_model(model);
+ return 1;
+ }
+ }
+
+ GGML_ASSERT(llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM);
+
+#ifdef _WIN32
+ // We need this for unicode console support
+ console::init(false, false);
+ atexit([]() { console::cleanup(); });
+#endif
+
+ const int n_vocab = llama_n_vocab(model);
+
+ for (int i = 0; i < n_vocab; ++i) {
+ std::string str = llama_detokenize_spm(ctx, std::vector<int>(1, i));
+ std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
+ std::string check = llama_detokenize_spm(ctx, tokens);
+ if (check != str) {
+ fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
+ __func__, i, str.c_str(), str.length(), check.c_str(), check.length());
+ return 2;
+ }
+ }
+
+ // unicode
+ {
+ const int nthread = std::thread::hardware_concurrency();
+
+ std::vector<std::thread> threads(nthread);
+
+ for (int i = 0; i < nthread; ++i) {
+ threads[i] = std::thread([i, nthread, ctx]() {
+ for (uint32_t cp = i; cp < 0x0010ffff; cp += nthread) {
+ if (cp >= 0xd800 && cp <= 0xdfff) {
+ continue;
+ }
+
+ std::string str = unicode_cpt_to_utf8(cp);
+ std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
+ std::string check = llama_detokenize_spm(ctx, tokens);
+ if (cp != 9601 && str != check) {
+ fprintf(stderr, "error: codepoint %x detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
+ cp, check.c_str(), check.length(), str.c_str(), str.length());
+ std::exit(3);
+ }
+ }
+ });
+ }
+
+ for (auto & t : threads) {
+ t.join();
+ }
+ }
+
+ llama_free_model(model);
+ llama_free(ctx);
+
+ llama_backend_free();
+
+ return 0;
+}