summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/CMakeLists.txt128
-rw-r--r--tests/test-tokenizer-0-bpe.py (renamed from tests/test-tokenizer-0-falcon.py)37
-rw-r--r--tests/test-tokenizer-0-falcon.cpp187
-rw-r--r--tests/test-tokenizer-0-llama.cpp190
-rw-r--r--tests/test-tokenizer-0-spm.py (renamed from tests/test-tokenizer-0-llama.py)22
-rw-r--r--tests/test-tokenizer-0.cpp271
-rw-r--r--tests/test-tokenizer-1-spm.cpp (renamed from tests/test-tokenizer-1-llama.cpp)2
7 files changed, 422 insertions, 415 deletions
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index 89f23ca2..d23e7f77 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -1,10 +1,40 @@
+function(llama_test target)
+ include(CMakeParseArguments)
+ set(options)
+ set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
+ set(multiValueArgs ARGS)
+ cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
+
+ if (NOT DEFINED LLAMA_TEST_LABEL)
+ set(LLAMA_TEST_LABEL "main")
+ endif()
+ if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
+ set(LLAMA_TEST_WORKING_DIRECTORY .)
+ endif()
+ if (DEFINED LLAMA_TEST_NAME)
+ set(TEST_NAME ${LLAMA_TEST_NAME})
+ else()
+ set(TEST_NAME ${target})
+ endif()
+
+ set(TEST_TARGET ${target})
+
+ add_test(
+ NAME ${TEST_NAME}
+ WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
+ COMMAND $<TARGET_FILE:${TEST_TARGET}>
+ ${LLAMA_TEST_ARGS})
+
+ set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL})
+endfunction()
+
# Builds and runs a test source file.
# Optional args:
# - NAME: name of the executable & test target (defaults to the source file name without extension)
# - LABEL: label for the test (defaults to main)
# - ARGS: arguments to pass to the test executable
# - WORKING_DIRECTORY
-function(llama_test source)
+function(llama_target_and_test source)
include(CMakeParseArguments)
set(options)
set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
@@ -35,41 +65,67 @@ function(llama_test source)
set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${LLAMA_TEST_LABEL})
endfunction()
-# llama_test(test-double-float.cpp) # SLOW
-llama_test(test-quantize-fns.cpp)
-llama_test(test-quantize-perf.cpp)
-llama_test(test-sampling.cpp)
-llama_test(test-chat-template.cpp)
-
-llama_test(test-tokenizer-0-llama.cpp NAME test-tokenizer-0-llama ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
-llama_test(test-tokenizer-0-falcon.cpp NAME test-tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
-
-llama_test(test-tokenizer-1-llama.cpp NAME test-tokenizer-1-llama ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf)
-llama_test(test-tokenizer-1-llama.cpp NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
-
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-stablelm-3b-4e1t ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-stablelm-3b-4e1t.gguf)
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
-llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-gpt2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt2.gguf)
-#llama_test(test-tokenizer-1-bpe.cpp NAME test-tokenizer-1-bloom ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bloom.gguf) # BIG
-
-llama_test(test-grammar-parser.cpp)
-llama_test(test-llama-grammar.cpp)
-llama_test(test-grammar-integration.cpp)
-llama_test(test-grad0.cpp)
-# llama_test(test-opt.cpp) # SLOW
-llama_test(test-backend-ops.cpp)
-
-llama_test(test-rope.cpp)
-
-llama_test(test-model-load-cancel.cpp LABEL "model")
-llama_test(test-autorelease.cpp LABEL "model")
-
-llama_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
+# build test-tokenizer-0 target once and add many tests
+add_executable(test-tokenizer-0 test-tokenizer-0.cpp)
+target_link_libraries(test-tokenizer-0 PRIVATE common)
+install(TARGETS test-tokenizer-0 RUNTIME)
+
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-phi-3.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-llm.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-deepseek-coder.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bert-bge.gguf)
+# TODO: enable when fixed
+#llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
+llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-2.gguf)
+
+# build test-tokenizer-1-bpe target once and add many tests
+add_executable(test-tokenizer-1-bpe test-tokenizer-1-bpe.cpp)
+target_link_libraries(test-tokenizer-1-bpe PRIVATE common)
+install(TARGETS test-tokenizer-1-bpe RUNTIME)
+
+# TODO: disabled due to slowness
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-bpe.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-falcon.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-mpt.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-stablelm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-stablelm.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt-neox.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-refact.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-starcoder.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt2 ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-gpt2.gguf)
+#llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-bloom ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-bloom.gguf)
+
+# build test-tokenizer-1-spm target once and add many tests
+add_executable(test-tokenizer-1-spm test-tokenizer-1-spm.cpp)
+target_link_libraries(test-tokenizer-1-spm PRIVATE common)
+install(TARGETS test-tokenizer-1-spm RUNTIME)
+
+llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama-spm.gguf)
+#llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-baichuan.gguf)
+
+# llama_target_and_test(test-double-float.cpp) # SLOW
+llama_target_and_test(test-quantize-fns.cpp)
+llama_target_and_test(test-quantize-perf.cpp)
+llama_target_and_test(test-sampling.cpp)
+llama_target_and_test(test-chat-template.cpp)
+
+llama_target_and_test(test-grammar-parser.cpp)
+llama_target_and_test(test-llama-grammar.cpp)
+llama_target_and_test(test-grammar-integration.cpp)
+llama_target_and_test(test-grad0.cpp)
+# llama_target_and_test(test-opt.cpp) # SLOW
+llama_target_and_test(test-backend-ops.cpp)
+
+llama_target_and_test(test-rope.cpp)
+
+llama_target_and_test(test-model-load-cancel.cpp LABEL "model")
+llama_target_and_test(test-autorelease.cpp LABEL "model")
+
+llama_target_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/..)
target_include_directories(test-json-schema-to-grammar PRIVATE ${CMAKE_CURRENT_SOURCE_DIR}/../examples/server)
# dummy executable - not installed
diff --git a/tests/test-tokenizer-0-falcon.py b/tests/test-tokenizer-0-bpe.py
index 4f06ec9b..33a27244 100644
--- a/tests/test-tokenizer-0-falcon.py
+++ b/tests/test-tokenizer-0-bpe.py
@@ -1,4 +1,11 @@
# tests with BPE tokenizer
+#
+# sample usage:
+#
+# python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/Meta-Llama-3-8B-Instruct/
+# python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/falcon-7b/
+# python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/deepseek-coder-6.7b-instruct/
+#
import argparse
@@ -20,6 +27,8 @@ tests = [
" ",
"\t",
"\n",
+ "\n\n",
+ "\n\n\n",
"\t\n",
"Hello world",
" Hello world",
@@ -39,8 +48,19 @@ tests = [
" Hello",
" Hello",
" Hello\n Hello",
+ " (",
"\n =",
"' era",
+ "Hello, y'all! How are you 😁 ?ζˆ‘ζƒ³εœ¨appleε·₯作1314151倩~",
+ "3",
+ "33",
+ "333",
+ "3333",
+ "33333",
+ "333333",
+ "3333333",
+ "33333333",
+ "333333333",
]
for text in tests:
@@ -76,7 +96,22 @@ if fname_tok:
# write to file
with open(fname_out, 'w', encoding='utf-8') as f:
for x in res:
- f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
+ # LLaMA v3 for some reason strips the space for these tokens (and others)
+ # if x == 662:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 1174:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 2564:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 758:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 949:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # elif x == 5354:
+ # f.write(str(x) + ' \' ' + tokenizer.decode(x) + '\'\n')
+ # else:
+ # f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
+ f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
print('len(res): ', len(res))
print('len(lines): ', len(lines))
print('results written to: ', fname_out)
diff --git a/tests/test-tokenizer-0-falcon.cpp b/tests/test-tokenizer-0-falcon.cpp
deleted file mode 100644
index 472b0b3a..00000000
--- a/tests/test-tokenizer-0-falcon.cpp
+++ /dev/null
@@ -1,187 +0,0 @@
-#include "llama.h"
-#include "common.h"
-#include "console.h"
-
-#include <cstdio>
-#include <string>
-#include <map>
-#include <vector>
-#include <fstream>
-
-// generate using test-tokenizer-0-falcon.py
-static const std::map<std::string, std::vector<llama_token>> & k_tests() {
- static std::map<std::string, std::vector<llama_token>> _k_tests = {
- { "" , { }, },
- { " " , { 204, }, },
- { " " , { 258, }, },
- { " " , { 466, }, },
- { "\t" , { 192, }, },
- { "\n" , { 193, }, },
- { "\t\n" , { 19125, }, },
- { "Hello world" , { 9856, 1079, }, },
- { " Hello world" , { 23090, 1079, }, },
- { "Hello World" , { 9856, 2889, }, },
- { " Hello World" , { 23090, 2889, }, },
- { " Hello World!" , { 23090, 2889, 12, }, },
- { "Hello, world!" , { 9856, 23, 1079, 12, }, },
- { " Hello, world!" , { 23090, 23, 1079, 12, }, },
- { " this is πŸ¦™.cpp" , { 414, 304, 3346, 111, 231, 25, 29247, }, },
- { "w048 7tuijk dsdfhu" , { 98, 55866, 204, 34, 16682, 7149, 36190, 6869, 11481, }, },
- { "Π½Π΅Ρ‰ΠΎ Π½Π° Π‘ΡŠΠ»Π³Π°Ρ€ΡΠΊΠΈ" , { 150, 133, 6207, 151, 215, 150, 134, 5052, 133, 6279, 5052, 223, 151, 216, 49679, 123, 53110, 47043, 7795, }, },
- { "αž€αžΆαž“αŸ‹αžαŸ‚αž–αž·αžŸαŸαžŸαž’αžΆαž…αžαž›αž…αŸαž‰" , { 38154, 206, 38154, 126, 38154, 225, 167, 237, 217, 38154, 221, 167, 237, 208, 38154, 228, 38154, 127, 38154, 237, 167, 237, 207, 38154, 237, 38154, 107, 38154, 126, 38154, 211, 38154, 207, 38154, 233, 38154, 211, 167, 237, 207, 38154, 215, }, },
- { "πŸš€ (normal) πŸ˜Άβ€πŸŒ«οΈ (multiple emojis concatenated) βœ… (only emoji that has its own token)", { 2571, 232, 206, 204, 19, 11003, 20, 8196, 126, 283, 219, 48778, 116, 13392, 204, 19, 51831, 732, 63209, 1741, 7955, 522, 20, 22438, 211, 204, 19, 7927, 53360, 325, 504, 701, 946, 10930, 20, }, },
- { "Hello" , { 9856, }, },
- { " Hello" , { 23090, }, },
- { " Hello" , { 204, 23090, }, },
- { " Hello" , { 258, 23090, }, },
- { " Hello" , { 466, 23090, }, },
- { " Hello\n Hello" , { 466, 23090, 742, 23090, }, },
- { "\n =" , { 1212, 40, }, },
- { "' era" , { 18, 4932, }, },
- };
-
- return _k_tests;
-}
-
-int main(int argc, char **argv) {
- if (argc < 2) {
- fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
- return 1;
- }
-
- const std::string fname = argv[1];
-
- std::string fname_text;
- if (argc > 2) {
- fname_text = argv[2];
- }
-
- fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
-
- llama_model * model;
- llama_context * ctx;
-
- llama_backend_init();
-
- // load the vocab
- {
- auto mparams = llama_model_default_params();
-
- mparams.vocab_only = true;
-
- model = llama_load_model_from_file(fname.c_str(), mparams);
-
- if (model == NULL) {
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
- return 1;
- }
-
- auto cparams = llama_context_default_params();
-
- ctx = llama_new_context_with_model(model, cparams);
-
- if (ctx == NULL) {
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
- llama_free_model(model);
- return 1;
- }
- }
-
- if (llama_vocab_type(model) != LLAMA_VOCAB_TYPE_BPE) {
- fprintf(stderr, "%s : error: vocab type is not BPE\n", __func__);
- llama_free_model(model);
- llama_free(ctx);
- return 2;
- }
-
-#ifdef _WIN32
- // We need this for unicode console support
- console::init(false, false);
- atexit([]() { console::cleanup(); });
-#endif
-
- bool success = true;
-
- for (const auto & test_kv : k_tests()) {
- const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, false);
-
- printf("\n");
- printf("src: '%s'\n", test_kv.first.c_str());
- printf("res: '%s'\n", llama_detokenize_bpe(ctx, res).c_str());
- printf("tok: ");
- for (const auto & tok : res) {
- printf("%d ", tok);
- }
- printf("\n");
-
- bool correct = res.size() == test_kv.second.size();
-
- for (int i = 0; i < (int) res.size() && correct; ++i) {
- if (test_kv.second[i] != res[i]) {
- correct = false;
- }
- }
-
- if (!correct) {
- fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
- fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
- llama_detokenize_bpe(ctx, res).c_str(),
- llama_detokenize_bpe(ctx, test_kv.second).c_str());
- fprintf(stderr, "%s : expected tokens: ", __func__);
- for (const auto & t : test_kv.second) {
- fprintf(stderr, "%6d, ", t);
- }
- fprintf(stderr, "\n");
- fprintf(stderr, "%s : got tokens: ", __func__);
- for (const auto & t : res) {
- fprintf(stderr, "%6d, ", t);
- }
- fprintf(stderr, "\n");
-
- success = false;
- }
- }
-
- if (!fname_text.empty()) {
- fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
-
- std::string text;
- {
- std::ifstream ifs(fname_text);
- if (!ifs) {
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
- return 1;
- }
- text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
- }
-
- fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
-
- const std::vector<llama_token> res = llama_tokenize(ctx, text, false);
-
- fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
-
- {
- const std::string fname_out = fname_text + ".tokcpp";
-
- std::ofstream ofs(fname_out);
- if (!ofs) {
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
- return 1;
- }
-
- for (const auto & tok : res) {
- ofs << tok << " '" << llama_detokenize_bpe(ctx, std::vector<int>{tok}) << "'" << std::endl;
- }
- }
-
- fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
- }
-
- llama_free_model(model);
- llama_free(ctx);
-
- llama_backend_free();
-
- return success ? 0 : 3;
-}
diff --git a/tests/test-tokenizer-0-llama.cpp b/tests/test-tokenizer-0-llama.cpp
deleted file mode 100644
index 0a16cd7e..00000000
--- a/tests/test-tokenizer-0-llama.cpp
+++ /dev/null
@@ -1,190 +0,0 @@
-#include "llama.h"
-#include "common.h"
-#include "console.h"
-
-#include <cstdio>
-#include <string>
-#include <map>
-#include <vector>
-#include <fstream>
-
-// generate using test-tokenizer-0-llama.py
-static const std::map<std::string, std::vector<llama_token>> & k_tests() {
- static std::map<std::string, std::vector<llama_token>> _k_tests = {
- { "" , { }, },
- { " " , { 259, }, },
- { " " , { 1678, }, },
- { " " , { 268, }, },
- { "\t" , { 29871, 12, }, },
- { "\n" , { 29871, 13, }, },
- { "\t\n" , { 29871, 12, 13, }, },
- { "Hello world" , { 15043, 3186, }, },
- { " Hello world" , { 29871, 15043, 3186, }, },
- { "Hello World" , { 15043, 2787, }, },
- { " Hello World" , { 29871, 15043, 2787, }, },
- { " Hello World!" , { 29871, 15043, 2787, 29991, }, },
- { "Hello, world!" , { 15043, 29892, 3186, 29991, }, },
- { " Hello, world!" , { 29871, 15043, 29892, 3186, 29991, }, },
- { " this is πŸ¦™.cpp" , { 29871, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, },
- { "w048 7tuijk dsdfhu" , { 281, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, },
- { "Π½Π΅Ρ‰ΠΎ Π½Π° Π‘ΡŠΠ»Π³Π°Ρ€ΡΠΊΠΈ" , { 1538, 4851, 665, 1386, 29713, 1305, }, },
- { "αž€αžΆαž“αŸ‹αžαŸ‚αž–αž·αžŸαŸαžŸαž’αžΆαž…αžαž›αž…αŸαž‰" , { 29871, 31849, 31324, 31934, 228, 162, 142, 228, 161, 146, 228, 162, 133, 228, 161, 153, 228, 161, 186, 31708, 228, 162, 132, 31708, 228, 161, 165, 31324, 228, 161, 136, 228, 161, 132, 228, 161, 158, 228, 161, 136, 228, 162, 132, 228, 161, 140, }, },
- { "πŸš€ (normal) πŸ˜Άβ€πŸŒ«οΈ (multiple emojis concatenated) βœ… (only emoji that has its own token)", { 29871, 243, 162, 157, 131, 313, 8945, 29897, 29871, 243, 162, 155, 185, 30722, 243, 162, 143, 174, 30598, 313, 20787, 953, 3848, 275, 16125, 630, 29897, 29871, 31681, 313, 6194, 953, 29877, 2397, 393, 756, 967, 1914, 5993, 29897, }, },
- { "Hello" , { 15043, }, },
- { " Hello" , { 29871, 15043, }, },
- { " Hello" , { 259, 15043, }, },
- { " Hello" , { 1678, 15043, }, },
- { " Hello" , { 268, 15043, }, },
- { " Hello\n Hello" , { 268, 15043, 13, 1678, 15043, }, },
- { " (" , { 29871, 313, }, },
- };
-
- return _k_tests;
-}
-
-int main(int argc, char **argv) {
- if (argc < 2) {
- fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
- return 1;
- }
-
- const std::string fname = argv[1];
-
- std::string fname_text;
- if (argc > 2) {
- fname_text = argv[2];
- }
-
- fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
-
- llama_model * model;
- llama_context * ctx;
-
- llama_backend_init();
-
- // load the vocab
- {
- auto mparams = llama_model_default_params();
-
- mparams.vocab_only = true;
-
- model = llama_load_model_from_file(fname.c_str(), mparams);
-
- if (model == NULL) {
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
- return 1;
- }
-
- auto cparams = llama_context_default_params();
-
- ctx = llama_new_context_with_model(model, cparams);
-
- if (ctx == NULL) {
- fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
- llama_free_model(model);
- return 1;
- }
- }
-
- if (llama_vocab_type(model) != LLAMA_VOCAB_TYPE_SPM) {
- fprintf(stderr, "%s : error: vocab type is not SPM\n", __func__);
- llama_free_model(model);
- llama_free(ctx);
- return 2;
- }
-
-#ifdef _WIN32
- // We need this for unicode console support
- console::init(false, false);
- atexit([]() { console::cleanup(); });
-#endif
-
- bool success = true;
-
- for (const auto & test_kv : k_tests()) {
- const std::vector<llama_token> res_bos = llama_tokenize(ctx, test_kv.first, true);
- const std::vector<llama_token> res_nobos = llama_tokenize(ctx, test_kv.first, false);
-
- printf("\n");
- printf("src: '%s'\n", test_kv.first.c_str());
- printf("res: '%s'\n", llama_detokenize_spm(ctx, res_bos).c_str());
- printf("tok: ");
- for (const auto & tok : res_bos) {
- printf("%d ", tok);
- }
- printf("\n");
-
- bool correct = res_nobos.size() == test_kv.second.size() && res_bos.size() == res_nobos.size() + 1 && res_bos[0] == 1;
-
- for (int i = 0; i < (int) res_nobos.size() && correct; ++i) {
- if (test_kv.second[i] != res_bos[i + 1]) {
- correct = false;
- }
- if (test_kv.second[i] != res_nobos[i]) {
- correct = false;
- }
- }
-
- if (!correct) {
- fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
- fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
- llama_detokenize_spm(ctx, res_nobos).c_str(),
- llama_detokenize_spm(ctx, test_kv.second).c_str());
- fprintf(stderr, "%s : expected tokens: ", __func__);
- for (const auto & t : test_kv.second) {
- fprintf(stderr, "%6d, ", t);
- }
- fprintf(stderr, "\n");
- fprintf(stderr, "%s : got tokens: ", __func__);
- for (const auto & t : res_nobos) {
- fprintf(stderr, "%6d, ", t);
- }
- fprintf(stderr, "\n");
-
- success = false;
- }
- }
-
- if (!fname_text.empty()) {
- fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
-
- std::string text;
- {
- std::ifstream ifs(fname_text);
- if (!ifs) {
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
- return 1;
- }
- text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
- }
-
- fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
-
- const std::vector<llama_token> res = llama_tokenize(ctx, text, true);
-
- fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
-
- {
- const std::string fname_out = fname_text + ".tokcpp";
-
- std::ofstream ofs(fname_out);
- if (!ofs) {
- fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
- return 1;
- }
-
- for (const auto & tok : res) {
- ofs << tok << " '" << llama_detokenize_spm(ctx, std::vector<int>{tok}) << "'" << std::endl;
- }
- }
-
- fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
- }
-
- llama_free_model(model);
- llama_free(ctx);
-
- llama_backend_free();
-
- return success ? 0 : 3;
-}
diff --git a/tests/test-tokenizer-0-llama.py b/tests/test-tokenizer-0-spm.py
index f3d4d7e3..be12a6b9 100644
--- a/tests/test-tokenizer-0-llama.py
+++ b/tests/test-tokenizer-0-spm.py
@@ -1,4 +1,11 @@
# tests with SPM tokenizer
+#
+# sample usage:
+#
+# python3 tests/test-tokenizer-0-spm.py ~/Data/huggingface/Llama-2-7b-hf/
+# python3 tests/test-tokenizer-0-spm.py ~/Data/huggingface/CodeLlama-34b-Instruct-hf/
+#
+
import argparse
@@ -20,6 +27,8 @@ tests = [
" ",
"\t",
"\n",
+ "\n\n",
+ "\n\n\n",
"\t\n",
"Hello world",
" Hello world",
@@ -39,6 +48,19 @@ tests = [
" Hello",
" Hello",
" Hello\n Hello",
+ " (",
+ "\n =",
+ "' era",
+ "Hello, y'all! How are you 😁 ?ζˆ‘ζƒ³εœ¨appleε·₯作1314151倩~",
+ "3",
+ "33",
+ "333",
+ "3333",
+ "33333",
+ "333333",
+ "3333333",
+ "33333333",
+ "333333333",
]
diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp
new file mode 100644
index 00000000..5122757c
--- /dev/null
+++ b/tests/test-tokenizer-0.cpp
@@ -0,0 +1,271 @@
+#include "llama.h"
+#include "common.h"
+#include "console.h"
+
+#include <cstdio>
+#include <string>
+#include <map>
+#include <vector>
+#include <fstream>
+
+//static const std::map<std::string, std::vector<llama_token>> & k_tests() {
+// static std::map<std::string, std::vector<llama_token>> _k_tests = {
+// { "" , { }, },
+// { " " , { 220, }, },
+// { " " , { 256, }, },
+// { " " , { 262, }, },
+// { "\t" , { 197, }, },
+// { "\n" , { 198, }, },
+// { "\n\n" , { 271, }, },
+// { "\n\n\n" , { 1432, }, },
+// { "\t\n" , { 1602, }, },
+// { "Hello world" , { 9906, 1917, }, },
+// { " Hello world" , { 22691, 1917, }, },
+// { "Hello World" , { 9906, 4435, }, },
+// { " Hello World" , { 22691, 4435, }, },
+// { " Hello World!" , { 22691, 4435, 0, }, },
+// { "Hello, world!" , { 9906, 11, 1917, 0, }, },
+// { " Hello, world!" , { 22691, 11, 1917, 0, }, },
+// { " this is πŸ¦™.cpp" , { 420, 374, 11410, 99, 247, 13, 11055, }, },
+// { "w048 7tuijk dsdfhu" , { 86, 23904, 220, 22, 83, 2005, 42908, 11729, 3013, 17156, }, },
+// { "Π½Π΅Ρ‰ΠΎ Π½Π° Π‘ΡŠΠ»Π³Π°Ρ€ΡΠΊΠΈ" , { 79862, 102118, 13373, 64571, 34694, 3114, 112203, 80112, }, },
+// { "αž€αžΆαž“αŸ‹αžαŸ‚αž–αž·αžŸαŸαžŸαž’αžΆαž…αžαž›αž…αŸαž‰" , { 21549, 222, 98629, 241, 45358, 233, 21549, 237, 45358, 224, 21549, 244, 21549, 115, 21549, 253, 45358, 223, 21549, 253, 21549, 95, 98629, 227, 21549, 223, 21549, 249, 21549, 227, 45358, 223, 21549, 231, }, },
+// { "πŸš€ (normal) πŸ˜Άβ€πŸŒ«οΈ (multiple emojis concatenated) βœ… (only emoji that has its own token)", { 9468, 248, 222, 320, 8416, 8, 27623, 114, 102470, 9468, 234, 104, 31643, 320, 36773, 100166, 98634, 8, 26602, 227, 320, 3323, 43465, 430, 706, 1202, 1866, 4037, 8, }, },
+// { "Hello" , { 9906, }, },
+// { " Hello" , { 22691, }, },
+// { " Hello" , { 220, 22691, }, },
+// { " Hello" , { 256, 22691, }, },
+// { " Hello" , { 262, 22691, }, },
+// { " Hello\n Hello" , { 262, 22691, 198, 262, 22691, }, },
+// { " (" , { 320, }, },
+// { "\n =" , { 198, 284, }, },
+// { "' era" , { 6, 11639, }, },
+// { "Hello, y'all! How are you 😁 ?ζˆ‘ζƒ³εœ¨appleε·₯作1314151倩~", { 9906, 11, 379, 65948, 0, 2650, 527, 499, 27623, 223, 949, 37046, 101067, 19000, 23182, 102301, 9263, 18136, 16, 36827, 21909, }, },
+// { "3" , { 18, }, },
+// { "33" , { 1644, }, },
+// { "333" , { 8765, }, },
+// { "3333" , { 8765, 18, }, },
+// { "33333" , { 8765, 1644, }, },
+// { "333333" , { 8765, 8765, }, },
+// { "3333333" , { 8765, 8765, 18, }, },
+// { "33333333" , { 8765, 8765, 1644, }, },
+// { "333333333" , { 8765, 8765, 8765, }, },
+// };
+//
+// return _k_tests;
+//}
+
+static std::map<std::string, std::vector<llama_token>> read_tests(const std::string & fname_inp, const std::string & fname_out) {
+ std::map<std::string, std::vector<llama_token>> tests;
+
+ std::ifstream ifs_inp(fname_inp);
+ if (!ifs_inp) {
+ fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_inp.c_str());
+ return tests;
+ }
+
+ std::string sraw((std::istreambuf_iterator<char>(ifs_inp)), std::istreambuf_iterator<char>());
+
+ std::ifstream ifs_out(fname_out);
+ if (!ifs_out) {
+ fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
+ return tests;
+ }
+
+ std::vector<std::string> sout;
+ for (std::string line; std::getline(ifs_out, line);) {
+ sout.push_back(line);
+ }
+
+ const std::string sep = "\n__ggml_vocab_test__\n";
+
+ std::vector<std::string> sinp;
+
+ size_t pos = 0;
+ while (pos < sraw.size()) {
+ const size_t next = sraw.find(sep, pos);
+ if (next == std::string::npos) {
+ sinp.push_back(sraw.substr(pos));
+ break;
+ }
+ sinp.push_back(sraw.substr(pos, next - pos));
+ pos = next + sep.size();
+ }
+
+ if (sinp.size() != sout.size()) {
+ fprintf(stderr, "%s : error: input and output files have different number of tests\n", __func__);
+ return tests;
+ }
+
+ for (size_t i = 0; i < sinp.size(); ++i) {
+ const std::string & s = sinp[i];
+ const std::string & o = string_strip(sout[i]);
+
+ std::vector<llama_token> toks;
+
+ size_t pos = 0;
+ while (pos < o.size()) {
+ size_t next = o.find(' ', pos);
+ if (next == std::string::npos) {
+ next = o.size();
+ }
+ const std::string stok = o.substr(pos, next - pos);
+ toks.push_back(std::stoi(stok));
+ pos = next + 1;
+ }
+
+ tests[s] = toks;
+ }
+
+ return tests;
+}
+
+int main(int argc, char **argv) {
+ if (argc < 2) {
+ fprintf(stderr, "Usage: %s vocab-file [text-file]\n", argv[0]);
+ return 1;
+ }
+
+ const std::string fname = argv[1];
+
+ const std::string fname_inp = fname + ".inp";
+ const std::string fname_out = fname + ".out";
+
+ std::string fname_text;
+ if (argc > 2) {
+ fname_text = argv[2];
+ }
+
+ fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str());
+
+ llama_model * model;
+ llama_context * ctx;
+
+ llama_backend_init();
+
+ // load the vocab
+ {
+ auto mparams = llama_model_default_params();
+
+ mparams.vocab_only = true;
+
+ model = llama_load_model_from_file(fname.c_str(), mparams);
+
+ if (model == NULL) {
+ fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
+ return 1;
+ }
+
+ auto cparams = llama_context_default_params();
+
+ ctx = llama_new_context_with_model(model, cparams);
+
+ if (ctx == NULL) {
+ fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str());
+ llama_free_model(model);
+ return 1;
+ }
+ }
+
+#ifdef _WIN32
+ // We need this for unicode console support
+ console::init(false, false);
+ atexit([]() { console::cleanup(); });
+#endif
+
+ bool success = true;
+
+ const auto k_tests = read_tests(fname_inp, fname_out);
+
+ if (k_tests.empty()) {
+ fprintf(stderr, "%s : error: no tests found\n", __func__);
+ return 1;
+ }
+
+ const bool add_special = false;
+
+ for (const auto & test_kv : k_tests) {
+ const std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, add_special);
+
+ printf("\n");
+ printf("src: '%s'\n", test_kv.first.c_str());
+ printf("res: '%s'\n", llama_detokenize_bpe(ctx, res).c_str());
+ printf("tok: ");
+ for (const auto & tok : res) {
+ printf("%d ", tok);
+ }
+ printf("\n");
+
+ bool correct = res.size() == test_kv.second.size();
+ for (int i = 0; i < (int) res.size() && correct; ++i) {
+ if (test_kv.second[i] != res[i]) {
+ correct = false;
+ }
+ }
+
+ if (!correct) {
+ fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str());
+ fprintf(stderr, "%s : detokenized to: '%s' instead of '%s'\n", __func__,
+ llama_detokenize_bpe(ctx, res).c_str(),
+ llama_detokenize_bpe(ctx, test_kv.second).c_str());
+ fprintf(stderr, "%s : expected tokens: ", __func__);
+ for (const auto & t : test_kv.second) {
+ fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
+ }
+ fprintf(stderr, "\n");
+ fprintf(stderr, "%s : got tokens: ", __func__);
+ for (const auto & t : res) {
+ fprintf(stderr, "%6d '%s', ", t, llama_token_to_piece(ctx, t).c_str());
+ }
+ fprintf(stderr, "\n");
+
+ success = false;
+ }
+ }
+
+ if (!fname_text.empty()) {
+ fprintf(stderr, "%s : tokenizing: '%s'\n", __func__, fname_text.c_str());
+
+ std::string text;
+ {
+ std::ifstream ifs(fname_text);
+ if (!ifs) {
+ fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_text.c_str());
+ return 1;
+ }
+ text = std::string(std::istreambuf_iterator<char>(ifs), std::istreambuf_iterator<char>());
+ }
+
+ fprintf(stderr, "%s : text size: %zu\n", __func__, text.size());
+
+ const std::vector<llama_token> res = llama_tokenize(ctx, text, add_special);
+
+ fprintf(stderr, "%s : tokens: %zu\n", __func__, res.size());
+
+ {
+ const std::string fname_out = fname_text + ".tokcpp";
+
+ std::ofstream ofs(fname_out);
+ if (!ofs) {
+ fprintf(stderr, "%s : error: could not open file '%s'\n", __func__, fname_out.c_str());
+ return 1;
+ }
+
+ for (const auto & tok : res) {
+ ofs << tok << " '" << string_strip(llama_detokenize_bpe(ctx, std::vector<int>{tok})) << "'" << std::endl;
+ }
+ }
+
+ fprintf(stderr, "%s : tokens written to '%s'\n", __func__, (fname_text + ".tokcpp").c_str());
+ }
+
+ llama_free_model(model);
+ llama_free(ctx);
+
+ llama_backend_free();
+
+ printf("\n");
+ printf("Tests %s\n", success ? "passed" : "failed");
+
+ return success ? 0 : 3;
+}
diff --git a/tests/test-tokenizer-1-llama.cpp b/tests/test-tokenizer-1-spm.cpp
index 8caf0b24..ac2333dd 100644
--- a/tests/test-tokenizer-1-llama.cpp
+++ b/tests/test-tokenizer-1-spm.cpp
@@ -12,7 +12,7 @@
#include <thread>
#include <vector>
-int main(int argc, char **argv) {
+int main(int argc, char ** argv) {
if (argc < 2) {
fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]);
return 1;