From f9307d79071c2a1e8efe10ecb1e1304bf77c021a Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Sun, 23 Mar 2025 17:10:52 +0100 Subject: Improve DeepSeek batched processing speed (#282) * Improve DeepSeek batched processing speed * Revert the commented out section in iqk_mul_mat.cpp It does have some benefit at long contexts. --------- Co-authored-by: Iwan Kawrakow --- src/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/llama.cpp b/src/llama.cpp index 395b2879..24737265 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -13896,7 +13896,7 @@ struct llm_build_context { // whether to use n_tokens as the matrix dimension during multiplication or n_head // n_tokens is higher during prompt processing, this allows to optimize for this case - bool pp_opt = n_tokens > n_head; + bool pp_opt = n_tokens >= 128; // Is it a fixed constant or is it somehow relared to n_head? original: n_tokens > n_head; for (int il = 0; il < n_layer; ++il) { struct ggml_tensor * inpSA = inpL; -- cgit v1.2.3