summaryrefslogtreecommitdiff
path: root/tests/test-tokenizer-1-llama.cpp
diff options
context:
space:
mode:
authorgoerch <jhr.walter@t-online.de>2023-09-16 13:41:33 +0200
committerGitHub <noreply@github.com>2023-09-16 13:41:33 +0200
commitb08e75baea294e366628b898e85c0bd359b58115 (patch)
tree417a1a8e7589567ceedba88771056aee080c8e70 /tests/test-tokenizer-1-llama.cpp
parente6616cf0db2b63189fc34d0076f654af9adecdf8 (diff)
Fixing the last deviations from sentencepiece indicated by test-tokenizer-1 (#3170)
* Fix für #2721 * Reenable tokenizer test for LLaMa * Add `console.cpp` dependency * Fix dependency to `common` * Fixing wrong fix. * Make console usage platform specific Work on compiler warnings. * Adapting makefile * Remove trailing whitespace * Adapting the other parts of the makefile * Fix typo. * Fixing the last deviations from sentencepiece indicated by test-tokenizer-1 * Simplify logic * Add missing change... * Fix ugly compiler warning * llama_tokenize should accept strings containing NUL now * Adding huichen's test case
Diffstat (limited to 'tests/test-tokenizer-1-llama.cpp')
-rw-r--r--tests/test-tokenizer-1-llama.cpp14
1 files changed, 6 insertions, 8 deletions
diff --git a/tests/test-tokenizer-1-llama.cpp b/tests/test-tokenizer-1-llama.cpp
index 804ea248..a95d462c 100644
--- a/tests/test-tokenizer-1-llama.cpp
+++ b/tests/test-tokenizer-1-llama.cpp
@@ -87,10 +87,9 @@ int main(int argc, char **argv) {
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
std::string check = llama_detokenize_spm(ctx, tokens);
if (check != str) {
- fprintf(stderr, "%s : error: token %d detokenizes to >%s<(%llu) but tokenization of this detokenizes to >%s<(%llu)\n",
+ fprintf(stderr, "%s : error: token %d detokenizes to '%s'(%zu) but tokenization of this detokenizes to '%s'(%zu)\n",
__func__, i, str.c_str(), str.length(), check.c_str(), check.length());
- if(i != 3)
- return 2;
+ return 2;
}
}
@@ -99,11 +98,10 @@ int main(int argc, char **argv) {
std::string str = codepoint_to_utf8(cp);
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
std::string check = llama_detokenize_spm(ctx, tokens);
- if (str != check) {
- fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n",
+ if (cp != 9601 && str != check) {
+ fprintf(stderr, "%s : error: codepoint %d detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
__func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
- if(cp != 0 && cp != 9601)
- return 3;
+ return 3;
}
}
}
@@ -112,7 +110,7 @@ int main(int argc, char **argv) {
std::vector<llama_token> tokens = llama_tokenize(ctx, str, false);
std::string check = llama_detokenize_spm(ctx, tokens);
if (str != check) {
- fprintf(stderr, "%s : error: codepoint %d detokenizes to >%s<(%llu) instead of >%s<(%llu)\n",
+ fprintf(stderr, "%s : error: codepoint %d detokenizes to '%s'(%zu) instead of '%s'(%zu)\n",
__func__, cp, check.c_str(), check.length(), str.c_str(), str.length());
return 4;
}