summaryrefslogtreecommitdiff
path: root/common/common.h
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-03-22 15:33:38 +0200
committerGitHub <noreply@github.com>2024-03-22 15:33:38 +0200
commit80bd33bc2c4be352697dc8473339f25e1085d117 (patch)
treeaada7156008e4ad7fb0be8c6182e5d97f175b201 /common/common.h
parente80f06d2a194be62ab5b1cd7ef7c7a5b241dd4fb (diff)
common : add HF arg helpers (#6234)
* common : add HF arg helpers * common : remove defaults
Diffstat (limited to 'common/common.h')
-rw-r--r--common/common.h10
1 files changed, 6 insertions, 4 deletions
diff --git a/common/common.h b/common/common.h
index 31fd401b..d827d4df 100644
--- a/common/common.h
+++ b/common/common.h
@@ -89,9 +89,11 @@ struct gpt_params {
struct llama_sampling_params sparams;
std::string model = "models/7B/ggml-model-f16.gguf"; // model path
- std::string model_url = ""; // model url to download
- std::string model_draft = ""; // draft model for speculative decoding
+ std::string model_draft = ""; // draft model for speculative decoding
std::string model_alias = "unknown"; // model alias
+ std::string model_url = ""; // model url to download
+ std::string hf_repo = ""; // HF repo
+ std::string hf_file = ""; // HF file
std::string prompt = "";
std::string prompt_file = ""; // store the external prompt file name
std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
@@ -192,8 +194,8 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
struct llama_model_params llama_model_params_from_gpt_params (const gpt_params & params);
struct llama_context_params llama_context_params_from_gpt_params(const gpt_params & params);
-struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model,
- struct llama_model_params params);
+struct llama_model * llama_load_model_from_url(const char * model_url, const char * path_model, const struct llama_model_params & params);
+struct llama_model * llama_load_model_from_hf(const char * repo, const char * file, const char * path_model, const struct llama_model_params & params);
// Batch utils