summaryrefslogtreecommitdiff
path: root/common/common.cpp
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-05-12 07:49:51 +0300
committerGitHub <noreply@github.com>2025-05-12 07:49:51 +0300
commitf27cd405422307e02dffa8949ac30bc56b4d2900 (patch)
tree722b742827684815ca2cc0fb6379edd4edd2f3fd /common/common.cpp
parent465569dff8b49a195450a0eb1974fd72a32fcebc (diff)
Enable faster prompt processing with mainline llama.cpp GGUFs (#409)
* Enable MLA-3 in crippled GGUFs: WIP * Enable MLA-3 in crippled GGUFs: seems to work * Add newly created tensors to model.tensors_by_name Else they don't get run-time repacked. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'common/common.cpp')
-rw-r--r--common/common.cpp1
1 files changed, 1 insertions, 0 deletions
diff --git a/common/common.cpp b/common/common.cpp
index ab936ee7..0dbde58f 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -2334,6 +2334,7 @@ struct llama_model_params llama_model_params_from_gpt_params(const gpt_params &
if (params.n_gpu_layers != -1) {
mparams.n_gpu_layers = params.n_gpu_layers;
}
+ mparams.mla = params.mla_attn;
mparams.rpc_servers = params.rpc_servers.c_str();
mparams.main_gpu = params.main_gpu;
mparams.split_mode = params.split_mode;