diff options
Diffstat (limited to 'common')
-rw-r--r-- | common/common.cpp | 17 | ||||
-rw-r--r-- | common/common.h | 1 |
2 files changed, 18 insertions, 0 deletions
diff --git a/common/common.cpp b/common/common.cpp index f0c618e0..ab936ee7 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1213,6 +1213,17 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa } return true; } + if (arg == "--offload-policy" || arg == "-op") { + CHECK_ARG + auto p = string_split_pairs<int,int>(argv[i], ','); + if (p.empty()) { + fprintf(stderr, "error: Invalid offload policy argument: %s\n", argv[i]); + invalid_param = true; + } else { + params.offload_policy.insert(params.offload_policy.end(), p.begin(), p.end()); + } + return true; + } if (arg == "--host") { CHECK_ARG params.hostname = argv[i]; @@ -2222,6 +2233,10 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { return iparams; } + for (auto [op, on_off] : params.offload_policy) { + llama_set_offload_policy(lctx, op, on_off); + } + if (!params.control_vectors.empty()) { if (params.control_vector_layer_start <= 0) params.control_vector_layer_start = 1; if (params.control_vector_layer_end <= 0) params.control_vector_layer_end = llama_n_layer(model); @@ -2418,6 +2433,8 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param cparams.type_k = kv_cache_type_from_str(params.cache_type_k); cparams.type_v = kv_cache_type_from_str(params.cache_type_v); + if (!params.offload_policy.empty()) cparams.offload_policy = (void *)¶ms.offload_policy; + return cparams; } diff --git a/common/common.h b/common/common.h index b4f75236..fd83c9d3 100644 --- a/common/common.h +++ b/common/common.h @@ -143,6 +143,7 @@ struct gpt_params { std::vector<std::string> antiprompt; // strings upon which more user input is prompted (a.k.a. reverse prompts) std::vector<llama_model_kv_override> kv_overrides; std::vector<llama_model_tensor_buft_override> tensor_buft_overrides; + std::vector<std::pair<int,int>> offload_policy; bool lora_init_without_apply = false; // only load lora to memory, but do not apply it to ctx (user can manually apply lora later using llama_lora_adapter_apply) std::vector<llama_lora_adapter_info> lora_adapters; // lora adapter path with user defined scale |