summaryrefslogtreecommitdiff
path: root/examples/speculative
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-09-28 21:42:38 +0200
committerGitHub <noreply@github.com>2023-09-28 22:42:38 +0300
commit16bc66d9479edd5ee12ec734973554d4493c5dfa (patch)
tree4cca787ebd86dd55fd176d27112117c74e9b34c6 /examples/speculative
parent0512d66670de3f650c579519833c085014b0f200 (diff)
llama.cpp : split llama_context_params into model and context params (#3301)
* llama.cpp : split llama_context_params into model and context params ggml-ci * fix metal build * fix freq_base/scale default to model value * llama-bench : keep the same model between tests when possible * move n_threads to llama_context_params, add n_threads_batch * fix mpi build * remove kv_size(), cuda scratch fixes * remove low-vram option * add n_threads_batch to system info, refactor to get_system_info() * add documentation about --threads-batch to the READMEs * llama-bench fix * main : fix rope freq/scale warning * llama.cpp : add llama_get_model common : add llama_tokenize from model * remove duplicated ctx/model functions ggml-ci * cuda : print total VRAM used
Diffstat (limited to 'examples/speculative')
-rw-r--r--examples/speculative/speculative.cpp16
1 files changed, 8 insertions, 8 deletions
diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp
index 2445d78d..c5e5b234 100644
--- a/examples/speculative/speculative.cpp
+++ b/examples/speculative/speculative.cpp
@@ -70,16 +70,16 @@ int main(int argc, char ** argv) {
const auto t_enc_start = ggml_time_us();
// eval the prompt with both models
- llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1, 0, 0), params.n_threads);
- llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0), params.n_threads);
- llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input, 0, 0), params.n_threads);
+ llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1, 0, 0));
+ llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0));
+ llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input, 0, 0));
const auto t_enc_end = ggml_time_us();
// the 2 models should have the same vocab
const int n_ctx = llama_n_ctx(ctx_tgt);
- const int n_vocab = llama_n_vocab(ctx_tgt);
- //GGML_ASSERT(n_vocab == llama_n_vocab(ctx_dft));
+ const int n_vocab = llama_n_vocab(model_tgt);
+ //GGML_ASSERT(n_vocab == llama_n_vocab(model_dft));
// how many tokens to draft each time
int n_draft = params.n_draft;
@@ -173,7 +173,7 @@ int main(int argc, char ** argv) {
}
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, n_ctx);
- llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0), params.n_threads);
+ llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0));
++n_past_dft;
// heuristic for n_draft
@@ -258,7 +258,7 @@ int main(int argc, char ** argv) {
// evaluate the drafted token on the draft model
llama_kv_cache_seq_rm(ctx_dft, 0, n_past_cur, n_ctx);
- llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0), params.n_threads);
+ llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0));
++n_past_cur;
if (grammar_dft != NULL) {
@@ -268,7 +268,7 @@ int main(int argc, char ** argv) {
// evaluate the target model on the drafted tokens
llama_kv_cache_seq_rm(ctx_tgt, 0, n_past_tgt, n_ctx);
- llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0), params.n_threads);
+ llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0));
++n_past_tgt;
// the first token is always proposed by the traget model before the speculation loop