From 8f43e551038af2547b5c01d0e9edd641c0e4bd29 Mon Sep 17 00:00:00 2001 From: Kawrakow <48489457+ikawrakow@users.noreply.github.com> Date: Mon, 12 Aug 2024 15:14:32 +0200 Subject: Merge mainline - Aug 12 2024 (#17) * Merge mainline * Fix after merge * Remove CI check --------- Co-authored-by: Iwan Kawrakow --- examples/perplexity/perplexity.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'examples/perplexity/perplexity.cpp') diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index dbe44539..372684f0 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -2018,11 +2018,11 @@ int main(int argc, char ** argv) { llama_backend_init(); llama_numa_init(params.numa); - llama_model * model; - llama_context * ctx; - // load the model and apply lora adapter, if any - std::tie(model, ctx) = llama_init_from_gpt_params(params); + llama_init_result llama_init = llama_init_from_gpt_params(params); + + llama_model * model = llama_init.model; + llama_context * ctx = llama_init.context; if (model == NULL) { fprintf(stderr, "%s: error: unable to load model\n", __func__); return 1; -- cgit v1.2.3