diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-10-03 21:04:01 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-03 21:04:01 +0300 |
commit | ac2219fef34eb5b713c286c34c6e4162c39c8f3b (patch) | |
tree | 00d1bdad46d4995188ff084ae6178b9ff55f7431 /examples/server/server.cpp | |
parent | 48be797ffbd80b062f55778e09e97180eb25d2ab (diff) |
llama : fix session saving/loading (#3400)
* llama : fix session saving/loading
* llama : temp fix for clearing "future" tokens from the KV cache
* llama : fix handling of "future" tokens when loading sessions
* llama : fix comments for llama_kv_cache API
Diffstat (limited to 'examples/server/server.cpp')
-rw-r--r-- | examples/server/server.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 6dda5e36..921eb5da 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -448,7 +448,7 @@ struct llama_server_context n_past = common_part(embd, prompt_tokens); // since #3228 we now have to manually manage the KV cache - llama_kv_cache_seq_rm(ctx, 0, n_past, params.n_ctx); + llama_kv_cache_seq_rm(ctx, 0, n_past, -1); embd = prompt_tokens; if (n_past == num_prompt_tokens) |