From 5be6c803fa5378f62a1590f3ad8c6b64c7c0c2ce Mon Sep 17 00:00:00 2001 From: Marcus Dunn <51931484+MarcusDunn@users.noreply.github.com> Date: Mon, 23 Oct 2023 12:40:03 -0700 Subject: llama : remove token functions with `context` args in favor of `model` (#3720) * added `llama_model_token_*` variants to all the `llama_token_*` functions. * added `LLAMA_API` * formatting Co-authored-by: Georgi Gerganov * removed old `llama_token` functions * changed 3 more functions to take in model - `llama_token_get_text` - `llama_token_get_score` - `llama_token_get_type` * added back docs * fixed main.cpp * changed token functions to use new model variants * changed token functions to use new model variants --------- Co-authored-by: Georgi Gerganov --- examples/batched/batched.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'examples/batched/batched.cpp') diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index 2797329b..75856a81 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -180,7 +180,7 @@ int main(int argc, char ** argv) { //const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p); // is it an end of stream? -> mark the stream as finished - if (new_token_id == llama_token_eos(ctx) || n_cur == n_len) { + if (new_token_id == llama_token_eos(model) || n_cur == n_len) { i_batch[i] = -1; LOG_TEE("\n"); if (n_parallel > 1) { -- cgit v1.2.3