From 16bc66d9479edd5ee12ec734973554d4493c5dfa Mon Sep 17 00:00:00 2001 From: slaren Date: Thu, 28 Sep 2023 21:42:38 +0200 Subject: llama.cpp : split llama_context_params into model and context params (#3301) * llama.cpp : split llama_context_params into model and context params ggml-ci * fix metal build * fix freq_base/scale default to model value * llama-bench : keep the same model between tests when possible * move n_threads to llama_context_params, add n_threads_batch * fix mpi build * remove kv_size(), cuda scratch fixes * remove low-vram option * add n_threads_batch to system info, refactor to get_system_info() * add documentation about --threads-batch to the READMEs * llama-bench fix * main : fix rope freq/scale warning * llama.cpp : add llama_get_model common : add llama_tokenize from model * remove duplicated ctx/model functions ggml-ci * cuda : print total VRAM used --- examples/speculative/speculative.cpp | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) (limited to 'examples/speculative') diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index 2445d78d..c5e5b234 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -70,16 +70,16 @@ int main(int argc, char ** argv) { const auto t_enc_start = ggml_time_us(); // eval the prompt with both models - llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1, 0, 0), params.n_threads); - llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0), params.n_threads); - llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input, 0, 0), params.n_threads); + llama_decode(ctx_tgt, llama_batch_get_one( inp.data(), n_input - 1, 0, 0)); + llama_decode(ctx_tgt, llama_batch_get_one(&inp.back(), 1, n_input - 1, 0)); + llama_decode(ctx_dft, llama_batch_get_one( inp.data(), n_input, 0, 0)); const auto t_enc_end = ggml_time_us(); // the 2 models should have the same vocab const int n_ctx = llama_n_ctx(ctx_tgt); - const int n_vocab = llama_n_vocab(ctx_tgt); - //GGML_ASSERT(n_vocab == llama_n_vocab(ctx_dft)); + const int n_vocab = llama_n_vocab(model_tgt); + //GGML_ASSERT(n_vocab == llama_n_vocab(model_dft)); // how many tokens to draft each time int n_draft = params.n_draft; @@ -173,7 +173,7 @@ int main(int argc, char ** argv) { } llama_kv_cache_seq_rm(ctx_dft, 0, n_past_dft, n_ctx); - llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0), params.n_threads); + llama_decode(ctx_dft, llama_batch_get_one(&id, 1, n_past_dft, 0)); ++n_past_dft; // heuristic for n_draft @@ -258,7 +258,7 @@ int main(int argc, char ** argv) { // evaluate the drafted token on the draft model llama_kv_cache_seq_rm(ctx_dft, 0, n_past_cur, n_ctx); - llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0), params.n_threads); + llama_decode(ctx_dft, llama_batch_get_one(&drafted.back(), 1, n_past_cur, 0)); ++n_past_cur; if (grammar_dft != NULL) { @@ -268,7 +268,7 @@ int main(int argc, char ** argv) { // evaluate the target model on the drafted tokens llama_kv_cache_seq_rm(ctx_tgt, 0, n_past_tgt, n_ctx); - llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0), params.n_threads); + llama_decode(ctx_tgt, llama_batch_get_one(drafted.data(), drafted.size(), n_past_tgt, 0)); ++n_past_tgt; // the first token is always proposed by the traget model before the speculation loop -- cgit v1.2.3