summaryrefslogtreecommitdiff
path: root/examples/llava/llava-cli.cpp
diff options
context:
space:
mode:
authork.h.lai <adrian.k.h.lai@outlook.com>2024-05-13 22:02:36 +0800
committerGitHub <noreply@github.com>2024-05-14 00:02:36 +1000
commit30e70334f71b3bd115024affcf98cac3d79aaa95 (patch)
treeae230a53a7a49598571fb41e5f93f7969cba7a24 /examples/llava/llava-cli.cpp
parent1c570d8beeebad95872dc738ea542a4a0022f78a (diff)
llava-cli: fix base64 prompt (#7248)
Diffstat (limited to 'examples/llava/llava-cli.cpp')
-rw-r--r--examples/llava/llava-cli.cpp27
1 files changed, 21 insertions, 6 deletions
diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp
index da60ddf2..a6d67e5d 100644
--- a/examples/llava/llava-cli.cpp
+++ b/examples/llava/llava-cli.cpp
@@ -300,14 +300,10 @@ int main(int argc, char ** argv) {
return 1;
}
- for (auto & image : params.image) {
+ if (prompt_contains_image(params.prompt)) {
auto ctx_llava = llava_init_context(&params, model);
- auto image_embed = load_image(ctx_llava, &params, image);
- if (!image_embed) {
- std::cerr << "error: failed to load image " << image << ". Terminating\n\n";
- return 1;
- }
+ auto image_embed = load_image(ctx_llava, &params, "");
// process the prompt
process_prompt(ctx_llava, image_embed, &params, params.prompt);
@@ -316,7 +312,26 @@ int main(int argc, char ** argv) {
llava_image_embed_free(image_embed);
ctx_llava->model = NULL;
llava_free(ctx_llava);
+ } else {
+ for (auto & image : params.image) {
+ auto ctx_llava = llava_init_context(&params, model);
+
+ auto image_embed = load_image(ctx_llava, &params, image);
+ if (!image_embed) {
+ std::cerr << "error: failed to load image " << image << ". Terminating\n\n";
+ return 1;
+ }
+
+ // process the prompt
+ process_prompt(ctx_llava, image_embed, &params, params.prompt);
+
+ llama_print_timings(ctx_llava->ctx_llama);
+ llava_image_embed_free(image_embed);
+ ctx_llava->model = NULL;
+ llava_free(ctx_llava);
+ }
}
+
llama_free_model(model);
return 0;