diff options
author | slaren <slarengh@gmail.com> | 2023-09-28 21:42:38 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-09-28 22:42:38 +0300 |
commit | 16bc66d9479edd5ee12ec734973554d4493c5dfa (patch) | |
tree | 4cca787ebd86dd55fd176d27112117c74e9b34c6 /examples/beam-search | |
parent | 0512d66670de3f650c579519833c085014b0f200 (diff) |
llama.cpp : split llama_context_params into model and context params (#3301)
* llama.cpp : split llama_context_params into model and context params
ggml-ci
* fix metal build
* fix freq_base/scale default to model value
* llama-bench : keep the same model between tests when possible
* move n_threads to llama_context_params, add n_threads_batch
* fix mpi build
* remove kv_size(), cuda scratch fixes
* remove low-vram option
* add n_threads_batch to system info, refactor to get_system_info()
* add documentation about --threads-batch to the READMEs
* llama-bench fix
* main : fix rope freq/scale warning
* llama.cpp : add llama_get_model
common : add llama_tokenize from model
* remove duplicated ctx/model functions
ggml-ci
* cuda : print total VRAM used
Diffstat (limited to 'examples/beam-search')
-rw-r--r-- | examples/beam-search/beam-search.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/examples/beam-search/beam-search.cpp b/examples/beam-search/beam-search.cpp index 63da7c3e..f078ab8a 100644 --- a/examples/beam-search/beam-search.cpp +++ b/examples/beam-search/beam-search.cpp @@ -160,7 +160,7 @@ int main(int argc, char ** argv) int n_past = 0; - if (llama_decode(ctx, llama_batch_get_one(tokens_list.data(), tokens_list.size(), n_past, 0), params.n_threads)) + if (llama_decode(ctx, llama_batch_get_one(tokens_list.data(), tokens_list.size(), n_past, 0))) { fprintf(stderr, "%s : failed to eval prompt.\n" , __func__ ); return 1; @@ -170,7 +170,7 @@ int main(int argc, char ** argv) beam_search_callback_data callback_data{ctx, {}}; size_t const beam_width = static_cast<size_t>(params.n_beams); int const n_predict = 256; - llama_beam_search(ctx, beam_search_callback, &callback_data, beam_width, n_past, n_predict, params.n_threads); + llama_beam_search(ctx, beam_search_callback, &callback_data, beam_width, n_past, n_predict); std::cout << "\n\n"; for (llama_token const token_id : callback_data.response) { |