diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2024-04-21 18:36:45 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-21 18:36:45 +0300 |
commit | 40f74e4d739e9250431cf339ae7588b28d8d0663 (patch) | |
tree | 6008461e7a055c9b86c39df7117b943f1ecc0598 /common | |
parent | b9cc76d87e3d7ae5900f19d4fe8f8976d0a35888 (diff) |
llama : add option to render special/control tokens (#6807)
* make : fix common dep on llama.h
* llama : add option to render special tokens
* readme : add API change notice
ggml-ci
* swift : fix build
Diffstat (limited to 'common')
-rw-r--r-- | common/common.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/common/common.cpp b/common/common.cpp index b6143e41..06f252ea 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2328,10 +2328,10 @@ std::vector<llama_token> llama_tokenize( std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) { std::vector<char> result(8, 0); - const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size()); + const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), true); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size()); + int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), true); GGML_ASSERT(check == -n_tokens); } else { result.resize(n_tokens); |