summaryrefslogtreecommitdiff
path: root/examples/speculative
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-10-03 21:04:01 +0300
committerGitHub <noreply@github.com>2023-10-03 21:04:01 +0300
commitac2219fef34eb5b713c286c34c6e4162c39c8f3b (patch)
tree00d1bdad46d4995188ff084ae6178b9ff55f7431 /examples/speculative
parent48be797ffbd80b062f55778e09e97180eb25d2ab (diff)
llama : fix session saving/loading (#3400)
* llama : fix session saving/loading * llama : temp fix for clearing "future" tokens from the KV cache * llama : fix handling of "future" tokens when loading sessions * llama : fix comments for llama_kv_cache API
Diffstat (limited to 'examples/speculative')
-rw-r--r--examples/speculative/speculative.cpp6
1 files changed, 3 insertions, 3 deletions
diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp
index c5e5b234..75a2e5e2 100644
--- a/examples/speculative/speculative.cpp
+++ b/examples/speculative/speculative.cpp
@@ -172,7 +172,7 @@ int main(int argc, char ** argv) {
LOG("out of drafted tokens\n");
}
- llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, n_ctx);
+ llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, -1);
llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0));
++n_past_dft;
@@ -257,7 +257,7 @@ int main(int argc, char ** argv) {
}
// evaluate the drafted token on the draft model
- llama_kv_cache_seq_rm(ctx_dft, 0, n_past_cur, n_ctx);
+ llama_kv_cache_seq_rm(ctx_dft, 0, n_past_cur, -1);
llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0));
++n_past_cur;
@@ -267,7 +267,7 @@ int main(int argc, char ** argv) {
}
// evaluate the target model on the drafted tokens
- llama_kv_cache_seq_rm(ctx_tgt, 0, n_past_tgt, n_ctx);
+ llama_kv_cache_seq_rm(ctx_tgt, 0, n_past_tgt, -1);
llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0));
++n_past_tgt;