diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2024-03-22 09:36:03 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-22 09:36:03 +0200 |
commit | 95d576b48ebf582b112d1c9cf4eed7142fa4e464 (patch) | |
tree | 1e298e73e8e8de5d7f93d78272f44630fa7198cb /llama.cpp | |
parent | 59c17f02de8fdf7b084d6100b875b7e2bc07a83b (diff) |
metal : pad n_ctx by 32 (#6177)
* metal : require ne00 >= 128 for mat-mat kernels
ggml-ci
* llama : pad n_ctx by 32
ggml-ci
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 3 |
1 files changed, 3 insertions, 0 deletions
@@ -13044,6 +13044,9 @@ struct llama_context * llama_new_context_with_model( cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; + // this is necessary due to kv_self.n being padded later during inference + cparams.n_ctx = GGML_PAD(cparams.n_ctx, 32); + // with causal attention, the batch size is limited by the context size cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); |