diff options
author | firecoperana <xuqiaowei1124@gmail.com> | 2025-06-19 02:24:53 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-06-19 10:24:53 +0300 |
commit | 3f111ad7bbb2d4f721332f9b2b344e48b3bbf9aa (patch) | |
tree | a3a17ee74e0436253e17f0d322320ed554d34b0a /examples/llava/llava-cli.cpp | |
parent | c5368148cf3af7a3694e0eb03d24a08326c01d12 (diff) |
add dry sampler (#513)
* add dry sampler
* use vocab instead of model in dry_init function
* fix compile error for build test
---------
Co-authored-by: firecoperana <firecoperana>
Diffstat (limited to 'examples/llava/llava-cli.cpp')
-rw-r--r-- | examples/llava/llava-cli.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index 8c7dd2ae..889a6222 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -191,7 +191,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_ LOG_TEE("\n"); - struct llama_sampling_context * ctx_sampling = llama_sampling_init(params->sparams); + struct llama_sampling_context * ctx_sampling = llama_sampling_init(llama_get_model_vocab(ctx_llava->model),params->sparams); if (!ctx_sampling) { fprintf(stderr, "%s: failed to initialize sampling subsystem\n", __func__); exit(1); |