diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2024-06-17 19:40:01 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-06-17 19:40:01 +0300 |
commit | 7c26775adb579e92b59c82e8084c07a1d0f75e9c (patch) | |
tree | a4c5f453ddb14c32812d023deec3335bd290ec7a | |
parent | b473e95084c286780165568cf0f385f21141d68d (diff) |
llama : disable FA if KV head size do not match (#7982)
-rw-r--r-- | llama.cpp | 5 |
1 files changed, 5 insertions, 0 deletions
@@ -16260,6 +16260,11 @@ struct llama_context * llama_new_context_with_model( params.flash_attn = false; } + if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) { + LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__); + params.flash_attn = false; + } + if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) { LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); return nullptr; |