diff options
author | Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> | 2023-10-29 11:31:40 -0600 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-29 11:31:40 -0600 |
commit | 6e08281e588bbba1a5d180290a94a43f167f3a1a (patch) | |
tree | 46add394417eb2b5929793ca879c793a478fd3f8 /common | |
parent | 2046eb4345e62c4575b3cdc0115a51db89f3fb70 (diff) |
Extend llama_kv_cache_seq_rm to allow matching any sequence (#3843)
* Extend llama_kv_cache_seq_rm to allow matichng any sequence
* Replace llama_kv_cache_tokens_rm with llama_kv_cache_clear
Use llama_kv_cache_clear for cache clearing
Change calls to llama_kv_cache_tokens_rm that want to delete by position to use llama_kv_cache_seq_rm functionality
Diffstat (limited to 'common')
-rw-r--r-- | common/common.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/common/common.cpp b/common/common.cpp index f81f4d35..c187128d 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -889,7 +889,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par std::vector<llama_token> tmp = { llama_token_bos(model), llama_token_eos(model), }; llama_decode(lctx, llama_batch_get_one(tmp.data(), std::min(tmp.size(), (size_t) params.n_batch), 0, 0)); - llama_kv_cache_tokens_rm(lctx, -1, -1); + llama_kv_cache_clear(lctx); llama_reset_timings(lctx); } |