summaryrefslogtreecommitdiff
path: root/common/common.cpp
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2024-12-17 14:16:34 +0100
committerGitHub <noreply@github.com>2024-12-17 14:16:34 +0100
commit514ae086200a8cfd78af6a71b6c6ee14931ddc0e (patch)
tree0fa47186d7c82afbf078d530f5436c7eb1ae4d79 /common/common.cpp
parent4ade4c568c331acad22537f7b9519c740c7a06d0 (diff)
Be able to repack tensors at run time (#147)
* Be able to repack tensors at run time * Repack: also add bf16 as repackable type * Repack: make sure number of rows is a multiple of the packing --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'common/common.cpp')
-rw-r--r--common/common.cpp8
1 files changed, 8 insertions, 0 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 75dd78e6..95e91bc1 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -906,6 +906,11 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
params.use_mmap = false;
return true;
}
+ if (arg == "-rtr" || arg == "--run-time-repack") {
+ params.repack_tensors = true;
+ params.use_mmap = false;
+ return true;
+ }
if (arg == "--numa") {
CHECK_ARG
std::string value(argv[i]);
@@ -1579,6 +1584,7 @@ void gpt_params_print_usage(int /*argc*/, char ** argv, const gpt_params & param
if (llama_supports_mmap()) {
options.push_back({ "*", " --no-mmap", "do not memory-map model (slower load but may reduce pageouts if not using mlock)" });
}
+ options.push_back({ "*", " --run-time-repack", "repack tensors if interleaved variant is available"});
options.push_back({ "*", " --numa TYPE", "attempt optimizations that help on some NUMA systems\n"
" - distribute: spread execution evenly over all nodes\n"
" - isolate: only spawn threads on CPUs on the node that execution started on\n"
@@ -2204,6 +2210,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
mparams.use_mmap = params.use_mmap;
mparams.use_mlock = params.use_mlock;
mparams.check_tensors = params.check_tensors;
+ mparams.repack_tensors = params.repack_tensors;
if (params.kv_overrides.empty()) {
mparams.kv_overrides = NULL;
} else {
@@ -3244,6 +3251,7 @@ void yaml_dump_non_result_info(FILE * stream, const gpt_params & params, const l
fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict);
fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", sparams.n_probs);
fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false");
+ fprintf(stream, "repack: %s # default: false\n", params.repack_tensors ? "true" : "false");
fprintf(stream, "penalize_nl: %s # default: false\n", sparams.penalize_nl ? "true" : "false");
fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type);
fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride);