diff options
author | staviq <staviq@gmail.com> | 2023-10-17 17:11:01 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-17 18:11:01 +0300 |
commit | 1a159553f921a9209fed8c714494e57b3649f232 (patch) | |
tree | b880614b6be6541d1890db725a7292fccef93855 /common/common.cpp | |
parent | 281ef73c258cc1eebec8a64264240432d5878c4b (diff) |
tokenizer : special token handling (#3538)
* Rewrite special token handling from #1931
* shorten param name, add st verification by type
* use offsets instead of copy by substr
* formatting, remove copying iterator on delete
* llama : normalize code-style
* swift fix
* print pfx/sfx if verb, main: split pfx input sfx
* dont add space when using special tokens
* minor : comment + spacing
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'common/common.cpp')
-rw-r--r-- | common/common.cpp | 12 |
1 files changed, 7 insertions, 5 deletions
diff --git a/common/common.cpp b/common/common.cpp index 9c4f7df2..3e4b8a8c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -879,21 +879,23 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par std::vector<llama_token> llama_tokenize( const struct llama_context * ctx, const std::string & text, - bool add_bos) { - return llama_tokenize(llama_get_model(ctx), text, add_bos); + bool add_bos, + bool special) { + return llama_tokenize(llama_get_model(ctx), text, add_bos, special); } std::vector<llama_token> llama_tokenize( const struct llama_model * model, const std::string & text, - bool add_bos) { + bool add_bos, + bool special) { // upper limit for the number of tokens int n_tokens = text.length() + add_bos; std::vector<llama_token> result(n_tokens); - n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos); + n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos); + int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special); GGML_ASSERT(check == -n_tokens); } else { result.resize(n_tokens); |