diff options
-rw-r--r-- | ggml/src/iqk/iqk_mul_mat.cpp | 16 | ||||
-rw-r--r-- | src/llama.cpp | 2 |
2 files changed, 15 insertions, 3 deletions
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index 8b6d6b1c..4d29e2f0 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -17265,13 +17265,25 @@ template <int step_k, typename KHelper, typename VHelper> inline void iqk_deepseek_helper(KHelper& kh, VHelper& vh, int nq1, int nk1, int stride_q, int stride_m, int stride_qkv, const float * q, const char * mask, float scale, float softcap, float * qkv, float * M, float * S) { - if (nq1 % 8 == 0) { + if (nq1 >= 8) { FlashAttn<576, 512, 8, step_k> fa(scale, softcap); fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S); - } else { + } + else if (nq1 >= 4) { + FlashAttn<576, 512, 4, step_k> fa(scale, softcap); + fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S); + } + else { FlashAttn<576, 512, 1, step_k> fa(scale, softcap); fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S); } + //if (nq1 % 8 == 0) { + // FlashAttn<576, 512, 8, step_k> fa(scale, softcap); + // fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S); + //} else { + // FlashAttn<576, 512, 1, step_k> fa(scale, softcap); + // fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S); + //} } template <int step_k> diff --git a/src/llama.cpp b/src/llama.cpp index 395b2879..24737265 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -13896,7 +13896,7 @@ struct llm_build_context { // whether to use n_tokens as the matrix dimension during multiplication or n_head // n_tokens is higher during prompt processing, this allows to optimize for this case - bool pp_opt = n_tokens > n_head; + bool pp_opt = n_tokens >= 128; // Is it a fixed constant or is it somehow relared to n_head? original: n_tokens > n_head; for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; |