From 95d576b48ebf582b112d1c9cf4eed7142fa4e464 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Fri, 22 Mar 2024 09:36:03 +0200 Subject: metal : pad n_ctx by 32 (#6177) * metal : require ne00 >= 128 for mat-mat kernels ggml-ci * llama : pad n_ctx by 32 ggml-ci --- llama.cpp | 3 +++ 1 file changed, 3 insertions(+) (limited to 'llama.cpp') diff --git a/llama.cpp b/llama.cpp index 1a9fe0c4..9de4a860 100644 --- a/llama.cpp +++ b/llama.cpp @@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model( cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; + // this is necessary due to kv_self.n being padded later during inference + cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32); + // with causal attention, the batch size is limited by the context size cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); -- cgit v1.2.3