summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-03-23 17:10:52 +0100
committerGitHub <noreply@github.com>2025-03-23 17:10:52 +0100
commitf9307d79071c2a1e8efe10ecb1e1304bf77c021a (patch)
treeaea9c988eafe137b9c172ef1a56b54bbf272d4cf
parent5a4855e61c05b0c54ecad3f4155074d8f344b6f6 (diff)
Improve DeepSeek batched processing speed (#282)
* Improve DeepSeek batched processing speed * Revert the commented out section in iqk_mul_mat.cpp It does have some benefit at long contexts. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp16
-rw-r--r--src/llama.cpp2
2 files changed, 15 insertions, 3 deletions
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index 8b6d6b1c..4d29e2f0 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -17265,13 +17265,25 @@ template <int step_k, typename KHelper, typename VHelper>
inline void iqk_deepseek_helper(KHelper& kh, VHelper& vh,
int nq1, int nk1, int stride_q, int stride_m, int stride_qkv,
const float * q, const char * mask, float scale, float softcap, float * qkv, float * M, float * S) {
- if (nq1 % 8 == 0) {
+ if (nq1 >= 8) {
FlashAttn<576, 512, 8, step_k> fa(scale, softcap);
fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
- } else {
+ }
+ else if (nq1 >= 4) {
+ FlashAttn<576, 512, 4, step_k> fa(scale, softcap);
+ fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
+ }
+ else {
FlashAttn<576, 512, 1, step_k> fa(scale, softcap);
fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
}
+ //if (nq1 % 8 == 0) {
+ // FlashAttn<576, 512, 8, step_k> fa(scale, softcap);
+ // fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
+ //} else {
+ // FlashAttn<576, 512, 1, step_k> fa(scale, softcap);
+ // fa.compute(kh, vh, nq1, nk1, stride_q, stride_m, stride_qkv, q, mask, qkv, M, S);
+ //}
}
template <int step_k>
diff --git a/src/llama.cpp b/src/llama.cpp
index 395b2879..24737265 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -13896,7 +13896,7 @@ struct llm_build_context {
// whether to use n_tokens as the matrix dimension during multiplication or n_head
// n_tokens is higher during prompt processing, this allows to optimize for this case
- bool pp_opt = n_tokens > n_head;
+ bool pp_opt = n_tokens >= 128; // Is it a fixed constant or is it somehow relared to n_head? original: n_tokens > n_head;
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;