diff options
author | Pierrick Hymbert <pierrick.hymbert@gmail.com> | 2024-03-17 19:12:37 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-17 19:12:37 +0100 |
commit | d01b3c4c32357567f3531d4e6ceffc5d23e87583 (patch) | |
tree | 80e0a075a8b120d6b5b095a73cc36cb2a4535aed /common/common.h | |
parent | cd776c37c945bf58efc8fe44b370456680cb1b59 (diff) |
common: llama_load_model_from_url using --model-url (#6098)
* common: llama_load_model_from_url with libcurl dependency
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'common/common.h')
-rw-r--r-- | common/common.h | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/common/common.h b/common/common.h index 687f3425..8dd8a3ed 100644 --- a/common/common.h +++ b/common/common.h @@ -89,6 +89,7 @@ struct gpt_params { struct llama_sampling_params sparams; std::string model = "models/7B/ggml-model-f16.gguf"; // model path + std::string model_url = ""; // model url to download std::string model_draft = ""; // draft model for speculative decoding std::string model_alias = "unknown"; // model alias std::string prompt = ""; @@ -191,6 +192,9 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params); struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params); +struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, + struct llama_model_params params); + // Batch utils void llama_batch_clear(struct llama_batch & batch); |