diff options
author | Karthik Sethuraman <k.seth1993@gmail.com> | 2023-12-29 06:22:10 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-29 16:22:10 +0200 |
commit | b93edd22f55d3e5268263c3edcdae1818505c078 (patch) | |
tree | d4519850dfd72170db4488ce1bb9e973130d91d5 /examples/server/server.cpp | |
parent | 82d6eab224862a7044069fb9211dc4b29124264b (diff) |
server : allow to generate multimodal embeddings (#4681)
Diffstat (limited to 'examples/server/server.cpp')
-rw-r--r-- | examples/server/server.cpp | 12 |
1 files changed, 11 insertions, 1 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index c5035e20..31b8cf33 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -3077,7 +3077,17 @@ int main(int argc, char **argv) { prompt = ""; } - const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0} }, false, true, -1); + + json image_data; + if (body.count("image_data") != 0) { + image_data = body["image_data"]; + } + else + { + image_data = ""; + } + + const int task_id = llama.request_completion({ {"prompt", prompt}, { "n_predict", 0}, {"image_data", image_data} }, false, true, -1); task_result result = llama.next_result(task_id); return res.set_content(result.result_json.dump(), "application/json; charset=utf-8"); }); |