summaryrefslogtreecommitdiff
path: root/src/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'src/llama.cpp')
-rw-r--r--src/llama.cpp8
1 files changed, 4 insertions, 4 deletions
diff --git a/src/llama.cpp b/src/llama.cpp
index b2553802..0817c53c 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -17768,10 +17768,10 @@ struct llama_context * llama_new_context_with_model(
params.flash_attn = false;
}
- if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
- LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
- params.flash_attn = false;
- }
+ //if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) {
+ // LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__);
+ // params.flash_attn = false;
+ //}
if (params.type_v != GGML_TYPE_F16 && params.type_v != GGML_TYPE_BF16 && !params.flash_attn) {
LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__);