diff options
Diffstat (limited to 'tests')
-rw-r--r-- | tests/CMakeLists.txt | 41 | ||||
-rw-r--r-- | tests/test-grammar-parser.cpp | 3 | ||||
-rw-r--r-- | tests/test-llama-grammar.cpp | 6 | ||||
-rw-r--r-- | tests/test-tokenizer-0.cpp | 60 | ||||
-rw-r--r-- | tests/test-tokenizer-1.cpp | 131 |
5 files changed, 209 insertions, 32 deletions
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 276f39b3..4ccefe93 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -1,17 +1,36 @@ -function(llama_add_test source) +function(llama_build_executable source) get_filename_component(TEST_TARGET ${source} NAME_WE) add_executable(${TEST_TARGET} ${source}) install(TARGETS ${TEST_TARGET} RUNTIME) - target_link_libraries(${TEST_TARGET} PRIVATE llama) + target_link_libraries(${TEST_TARGET} PRIVATE llama common) +endfunction() + +function(llama_test_executable name source) + get_filename_component(TEST_TARGET ${source} NAME_WE) + # add_executable(${TEST_TARGET} ${source}) + # install(TARGETS ${TEST_TARGET} RUNTIME) + # target_link_libraries(${TEST_TARGET} PRIVATE llama) + add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN}) +endfunction() + +function(llama_build_and_test_executable source) + get_filename_component(TEST_TARGET ${source} NAME_WE) + add_executable(${TEST_TARGET} ${source}) + install(TARGETS ${TEST_TARGET} RUNTIME) + target_link_libraries(${TEST_TARGET} PRIVATE llama common) add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN}) endfunction() -# llama_add_test(test-double-float.cpp) # SLOW -llama_add_test(test-quantize-fns.cpp) -llama_add_test(test-quantize-perf.cpp) -llama_add_test(test-sampling.cpp) -llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin) -llama_add_test(test-grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp) -llama_add_test(test-llama-grammar.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/grammar-parser.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../llama.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../examples/common.cpp) -llama_add_test(test-grad0.cpp) # SLOW -# llama_add_test(test-opt.cpp) # SLOW +# llama_build_and_test_executable(test-double-float.cpp) # SLOW +llama_build_and_test_executable(test-quantize-fns.cpp) +llama_build_and_test_executable(test-quantize-perf.cpp) +llama_build_and_test_executable(test-sampling.cpp) +llama_build_executable(test-tokenizer-0.cpp) +llama_test_executable (test-tokenizer-0.llama test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf) +llama_build_executable(test-tokenizer-1.cpp) +llama_test_executable (test-tokenizer-1.llama test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-llama.gguf) +#llama_test_executable(test-tokenizer-1.aquila test-tokenizer-1.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab-aquila.gguf) +llama_build_and_test_executable(test-grammar-parser.cpp) +llama_build_and_test_executable(test-llama-grammar.cpp) +llama_build_and_test_executable(test-grad0.cpp) # SLOW +# llama_build_and_test_executable(test-opt.cpp) # SLOW diff --git a/tests/test-grammar-parser.cpp b/tests/test-grammar-parser.cpp index 7022988b..a0b5b043 100644 --- a/tests/test-grammar-parser.cpp +++ b/tests/test-grammar-parser.cpp @@ -3,7 +3,8 @@ #endif #include "llama.h" -#include "examples/grammar-parser.cpp" +#include "grammar-parser.h" + #include <cassert> int main() diff --git a/tests/test-llama-grammar.cpp b/tests/test-llama-grammar.cpp index 81c31e9e..73dd33dd 100644 --- a/tests/test-llama-grammar.cpp +++ b/tests/test-llama-grammar.cpp @@ -2,9 +2,9 @@ #undef NDEBUG #endif -#include "llama.cpp" -#include "examples/common.cpp" -#include "examples/grammar-parser.cpp" +#include "llama.cpp" // TODO: not great +#include "grammar-parser.h" + #include <cassert> int main() diff --git a/tests/test-tokenizer-0.cpp b/tests/test-tokenizer-0.cpp index 87fde164..81764565 100644 --- a/tests/test-tokenizer-0.cpp +++ b/tests/test-tokenizer-0.cpp @@ -1,22 +1,47 @@ #include "llama.h" +#include "common.h" #include <cstdio> #include <string> #include <map> #include <vector> -static const std::map<std::string, std::vector<llama_token>> & k_tests() -{ +static std::string unescape_whitespace(llama_context* ctx, const std::vector<llama_token>& tokens) { + std::string result; + for (size_t i = 0; i < tokens.size(); ++i) { + result += llama_token_to_str(ctx, tokens[i]); + } + return result; +} + +static const std::map<std::string, std::vector<llama_token>> & k_tests() { static std::map<std::string, std::vector<llama_token>> _k_tests = { - { "Hello World", { 1, 10994, 2787, }, }, - { " Hello World", { 1, 15043, 2787, }, }, - { " Hello World!", { 1, 15043, 2787, 29991, }, }, - { " this is 🦙.cpp", { 1, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, }, - { "w048 7tuijk dsdfhu", { 1, 29893, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, }, - { "нещо на Български", { 1, 821, 4851, 665, 1386, 29713, 1305, }, }, + { " ", {1, 259, }, }, + { "\t", { 1, 29871, 12, }, }, + { "\n", { 1, 29871, 13, }, }, + { "\t\n", { 1, 29871, 12, 13, }, }, + { "Hello world", { 1, 15043, 3186, }, }, + { " Hello world", { 1, 29871, 15043, 3186, }, }, + { "Hello World", { 1, 15043, 2787, }, }, + { " Hello World", { 1, 29871, 15043, 2787, }, }, + { " Hello World!", { 1, 29871, 15043, 2787, 29991, }, }, + { " this is 🦙.cpp", { 1, 29871, 445, 338, 29871, 243, 162, 169, 156, 29889, 8223, }, }, + { "w048 7tuijk dsdfhu", { 1, 281, 29900, 29946, 29947, 29871, 29955, 9161, 13535, 18031, 2176, 6905, }, }, + { "нещо на Български", { 1, 1538, 4851, 665, 1386, 29713, 1305, }, }, + { "កាន់តែពិសេសអាចខលចេញ", { 1, 29871, 31849, 31324, 31934, 228, 162, 142, 228, 161, + 146, 228, 162, 133, 228, 161, 153, 228, 161, 186, + 31708, 228, 162, 132, 31708, 228, 161, 165, 31324, 228, + 161, 136, 228, 161, 132, 228, 161, 158, 228, 161, + 136, 228, 162, 132, 228, 161, 140, }, }, + { "🚀 (normal) 😶🌫️ (multiple emojis concatenated) ✅ (only emoji that has its own token)", + { 1, 29871, 243, 162, 157, 131, 313, 8945, 29897, 29871, + 243, 162, 155, 185, 30722, 243, 162, 143, 174, 30598, + 313, 20787, 953, 3848, 275, 16125, 630, 29897, 29871, 31681, + 313, 6194, 953, 29877, 2397, 393, 756, 967, 1914, 5993, 29897, }, }, }; + return _k_tests; -}; +} int main(int argc, char **argv) { if (argc < 2) { @@ -64,10 +89,12 @@ int main(int argc, char **argv) { return 2; } + bool success = true; + for (const auto & test_kv : k_tests()) { - std::vector<llama_token> res(test_kv.first.size()); - const int n = llama_tokenize(ctx, test_kv.first.c_str(), res.data(), int(res.size()), true); - res.resize(n); + std::vector<llama_token> res = llama_tokenize(ctx, test_kv.first, true); + fprintf(stderr, "%s : '%s' tokenized to '%s'\n", + __func__, test_kv.first.c_str(), unescape_whitespace(ctx, res).c_str()); bool correct = res.size() == test_kv.second.size(); @@ -78,7 +105,8 @@ int main(int argc, char **argv) { } if (!correct) { - fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str()); + fprintf(stderr, "%s : failed test: '%s'\n", __func__, test_kv.first.c_str()); + fprintf(stderr, "%s : detokenized to: '%s'\n", __func__, unescape_whitespace(ctx, test_kv.second).c_str()); fprintf(stderr, "%s : expected tokens: ", __func__); for (const auto & t : test_kv.second) { fprintf(stderr, "%6d, ", t); @@ -90,9 +118,7 @@ int main(int argc, char **argv) { } fprintf(stderr, "\n"); - llama_free_model(model); - llama_free(ctx); - return 3; + success = false; } } @@ -101,5 +127,5 @@ int main(int argc, char **argv) { llama_backend_free(); - return 0; + return success ? 0 : 3; } diff --git a/tests/test-tokenizer-1.cpp b/tests/test-tokenizer-1.cpp new file mode 100644 index 00000000..d8db7cd9 --- /dev/null +++ b/tests/test-tokenizer-1.cpp @@ -0,0 +1,131 @@ +#include "llama.h" +#include "common.h" + +#include <cassert> +#include <cstdio> +#include <cstring> +#include <string> +#include <codecvt> +#include <map> +#include <vector> +#include <locale> + +static std::string escape_whitespace(const std::string& text) { + std::string result; + bool escaping = false; + result += "\xe2\x96\x81"; + for (size_t offs = 0; offs < text.length(); ++offs) { + if (text[offs] == ' ') { + if (!escaping) { + result += "\xe2\x96\x81"; + escaping = true; + } + } + else { + escaping = false; + result += text[offs]; + } + } + return result; +} + +static std::string unescape_whitespace(llama_context * ctx, const std::vector<llama_token> & tokens) { + std::string result; + for (size_t i = 0; i < tokens.size(); ++i) { + result += llama_token_to_str(ctx, tokens[i]); + } + return result; +} + +int main(int argc, char **argv) { + if (argc < 2) { + fprintf(stderr, "Usage: %s <vocab-file>\n", argv[0]); + return 1; + } + + const std::string fname = argv[1]; + + fprintf(stderr, "%s : reading vocab from: '%s'\n", __func__, fname.c_str()); + + llama_model * model; + llama_context * ctx; + + llama_backend_init(false); + + // load the vocab + { + auto lparams = llama_context_default_params(); + + lparams.vocab_only = true; + + model = llama_load_model_from_file(fname.c_str(), lparams); + + if (model == NULL) { + fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); + return 1; + } + + ctx = llama_new_context_with_model(model, lparams); + + if (ctx == NULL) { + fprintf(stderr, "%s: error: failed to load vocab '%s'\n", __func__, fname.c_str()); + llama_free_model(model); + return 1; + } + } + + const int n_vocab = llama_n_vocab(ctx); + + for (int i = 0; i < n_vocab; ++i) { + std::string forward = llama_token_to_str_bpe(ctx, i); + std::vector<llama_token> tokens = llama_tokenize_bpe(ctx, forward, false); + if (tokens.size() == 1) { + if (i != tokens[0]) { + std::string backward = llama_token_to_str(ctx, tokens[0]); + fprintf(stderr, "%s : error: token %d is string %s but bpe returns token %d %s\n", + __func__, i, llama_token_to_str(ctx, i).c_str(), tokens[0], backward.c_str()); + return 2; + } + } else { + llama_token_type type = llama_token_get_type(ctx, i); + if (type == LLAMA_TOKEN_TYPE_UNKNOWN || type == LLAMA_TOKEN_TYPE_CONTROL || type == LLAMA_TOKEN_TYPE_BYTE) { + fprintf(stderr, "%s : info: token %d is string %s and bpe returns tokens %s\n", + __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str()); + } else { + fprintf(stderr, "%s : error: token %d is string %s but bpe returns tokens %s\n", + __func__, i, llama_token_to_str(ctx, i).c_str(), unescape_whitespace(ctx, tokens).c_str()); + return 2; + } + } + } + +#ifdef _WIN32 + std::wstring_convert<typename std::codecvt_utf8<char16_t>, char16_t> u16converter; + for (char16_t ch = 0x0000; ch < 0xffff; ++ch) { + std::u16string u16str(1, ch); + std::string str = u16converter.to_bytes(u16str); + std::vector<llama_token> tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false); + if (tokens.size() == 1) { + fprintf(stderr, "%s : info: %s tokenized to %d \n", + __func__, str.c_str(), tokens[0]); + } + } + + std::wstring_convert<typename std::codecvt_utf8<char32_t>, char32_t> u32converter; + for (char32_t ch = 0x0000; ch < 0x0010ffff; ++ch) { + std::u32string u32str(1, ch); + std::string str = u32converter.to_bytes(u32str); + std::vector<llama_token> tokens = llama_tokenize(ctx, escape_whitespace(str).c_str(), false); + if (tokens.size() == 1) { + fprintf(stderr, "%s : info: %s tokenized to %d \n", __func__, str.c_str(), tokens[0]); + } + } +#endif + + llama_free_model(model); + llama_free(ctx); + + llama_backend_free(); + + return 0; +} |