summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-03-22 09:36:03 +0200
committerGitHub <noreply@github.com>2024-03-22 09:36:03 +0200
commit95d576b48ebf582b112d1c9cf4eed7142fa4e464 (patch)
tree1e298e73e8e8de5d7f93d78272f44630fa7198cb /llama.cpp
parent59c17f02de8fdf7b084d6100b875b7e2bc07a83b (diff)
metal : pad n_ctx by 32 (#6177)
* metal : require ne00 >= 128 for mat-mat kernels ggml-ci * llama : pad n_ctx by 32 ggml-ci
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp3
1 files changed, 3 insertions, 0 deletions
diff --git a/llama.cpp b/llama.cpp
index 1a9fe0c4..9de4a860 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model(
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
+ // this is necessary due to kv_self.n being padded later during inference
+ cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32);
+
// with causal attention, the batch size is limited by the context size
cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);