diff options
Diffstat (limited to 'common')
-rw-r--r-- | common/common.cpp | 8 | ||||
-rw-r--r-- | common/common.h | 1 |
2 files changed, 9 insertions, 0 deletions
diff --git a/common/common.cpp b/common/common.cpp index 75dd78e6..95e91bc1 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -906,6 +906,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa params.use_mmap = false; return true; } + if (arg == "-rtr" || arg == "--run-time-repack") { + params.repack_tensors = true; + params.use_mmap = false; + return true; + } if (arg == "--numa") { CHECK_ARG std::string value(argv[i]); @@ -1579,6 +1584,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param if (llama_supports_mmap()) { options.push_back({ "*", " --no-mmap", "do not memory-map model (slower load but may reduce pageouts if not using mlock)" }); } + options.push_back({ "*", " --run-time-repack", "repack tensors if interleaved variant is available"}); options.push_back({ "*", " --numa TYPE", "attempt optimizations that help on some NUMA systems\n" " - distribute: spread execution evenly over all nodes\n" " - isolate: only spawn threads on CPUs on the node that execution started on\n" @@ -2204,6 +2210,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params & mparams.use_mmap = params.use_mmap; mparams.use_mlock = params.use_mlock; mparams.check_tensors = params.check_tensors; + mparams.repack_tensors = params.repack_tensors; if (params.kv_overrides.empty()) { mparams.kv_overrides = NULL; } else { @@ -3244,6 +3251,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict); fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", sparams.n_probs); fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false"); + fprintf(stream, "repack: %s # default: false\n", params.repack_tensors ? "true" : "false"); fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false"); fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type); fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride); diff --git a/common/common.h b/common/common.h index 486017ef..73d7d650 100644 --- a/common/common.h +++ b/common/common.h @@ -187,6 +187,7 @@ struct gpt_params { bool no_kv_offload = false; // disable KV offloading bool warmup = true; // warmup run bool check_tensors = false; // validate tensor data + bool repack_tensors = false; // repack tensors if interleaved variant is available std::string cache_type_k = "f16"; // KV cache data type for the K std::string cache_type_v = "f16"; // KV cache data type for the V |