summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-06-17 19:40:01 +0300
committerGitHub <noreply@github.com>2024-06-17 19:40:01 +0300
commit7c26775adb579e92b59c82e8084c07a1d0f75e9c (patch)
treea4c5f453ddb14c32812d023deec3335bd290ec7a
parentb473e95084c286780165568cf0f385f21141d68d (diff)
llama : disable FA if KV head size do not match (#7982)
-rw-r--r--llama.cpp5
1 files changed, 5 insertions, 0 deletions
diff --git a/llama.cpp b/llama.cpp
index dd7020dc..61948751 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model(
params.flash_attn = false;
}
+ if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
+ LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
+ params.flash_attn = false;
+ }
+
if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) {
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);
return nullptr;