diff options
author | Xiang (Kevin) Li <kevinli020508@gmail.com> | 2023-12-09 16:29:27 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-09 23:29:27 +0200 |
commit | e18f7345a300920e234f732077bda660cc6cda9c (patch) | |
tree | 45b06956f2322b9156d59ab7df7a170576a225ae | |
parent | fe680e3d1080a765e5d3150ffd7bab189742898d (diff) |
grammar : revert the replacement of llama_token_to_piece with id_to_token (#4396)
-rw-r--r-- | llama.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
@@ -7503,7 +7503,7 @@ void llama_sample_grammar(struct llama_context * ctx, llama_token_data_array * c for (size_t i = 0; i < candidates->size; ++i) { const llama_token id = candidates->data[i].id; - const std::string & piece = ctx->model.vocab.id_to_token[id].text; + const std::string piece = llama_token_to_piece(ctx, id); if (id == eos) { if (!allow_eos) { candidates->data[i].logit = -INFINITY; @@ -7715,7 +7715,7 @@ void llama_grammar_accept_token(struct llama_context * ctx, struct llama_grammar GGML_ASSERT(false); } - const std::string & piece = ctx->model.vocab.id_to_token[token].text; + const std::string piece = llama_token_to_piece(ctx, token); // Note terminating 0 in decoded string const auto decoded = decode_utf8(piece, grammar->partial_utf8); |