From 3c98bfb33d149a0d9d3bb91604dd12709721e3cf Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Tue, 11 Feb 2025 14:46:30 +0200 Subject: DeepSeek FA support (CPU only) (#200) * Adding support for K head size != V head size This is relevant for DeepSeek models. At this point ggml CPU FA works. Now I need to go and change iqk FA to make it work with Dk != Dv. * iqk support for K head size != V head size To not have compilation time explode, just Dk = 192, Dv = 128 for now (DeepSeek) * FA: very slightly faster for nq = 1 (TG) --------- Co-authored-by: Iwan Kawrakow --- src/llama.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'src/llama.cpp') diff --git a/src/llama.cpp b/src/llama.cpp index b2553802..0817c53c 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -17768,10 +17768,10 @@ struct llama_context * llama_new_context_with_model( params.flash_attn = false; } - if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) { - LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__); - params.flash_attn = false; - } + //if (params.flash_attn && model->hparams.n_embd_head_k != model->hparams.n_embd_head_v) { + // LLAMA_LOG_WARN("%s: flash_attn requires n_embd_head_k == n_embd_head_v - forcing off\n", __func__); + // params.flash_attn = false; + //} if (params.type_v != GGML_TYPE_F16 && params.type_v != GGML_TYPE_BF16 && !params.flash_attn) { LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); -- cgit v1.2.3