From 37246b1031b1680c0dcaf20aef736d6b446203fa Mon Sep 17 00:00:00 2001 From: Kyle Mistele Date: Wed, 24 Apr 2024 05:15:29 -0500 Subject: common : revert showing control tokens by default for server (#6860) * fix: revert showing control tokens by default * feat: revert changes to default behavior of llama_token_to_piece; provide overridden declaration to receive "bool special" param to toggle showing control tokens * feat: use the overridden declaration of llama_token_to_piece from common/common.cpp to specify "false" so that control tokens are not shown in chat completion responses" * common : simplify --------- Co-authored-by: Georgi Gerganov --- common/common.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) (limited to 'common/common.cpp') diff --git a/common/common.cpp b/common/common.cpp index a0d1f8d5..97f55b05 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2328,12 +2328,12 @@ std::vector llama_tokenize( return result; } -std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token) { +std::string llama_token_to_piece(const struct llama_context * ctx, llama_token token, bool special) { std::vector result(8, 0); - const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), true); + const int n_tokens = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), true); + int check = llama_token_to_piece(llama_get_model(ctx), token, result.data(), result.size(), special); GGML_ASSERT(check == -n_tokens); } else { result.resize(n_tokens); -- cgit v1.2.3