summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-03-23 17:10:52 +0100
committerGitHub <noreply@github.com>2025-03-23 17:10:52 +0100
commitf9307d79071c2a1e8efe10ecb1e1304bf77c021a (patch)
treeaea9c988eafe137b9c172ef1a56b54bbf272d4cf /src
parent5a4855e61c05b0c54ecad3f4155074d8f344b6f6 (diff)
Improve DeepSeek batched processing speed (#282)
* Improve DeepSeek batched processing speed * Revert the commented out section in iqk_mul_mat.cpp It does have some benefit at long contexts. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'src')
-rw-r--r--src/llama.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/src/llama.cpp b/src/llama.cpp
index 395b2879..24737265 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -13896,7 +13896,7 @@ struct llm_build_context {
// whether to use n_tokens as the matrix dimension during multiplication or n_head
// n_tokens is higher during prompt processing, this allows to optimize for this case
- bool pp_opt = n_tokens > n_head;
+ bool pp_opt = n_tokens >= 128; // Is it a fixed constant or is it somehow relared to n_head? original: n_tokens > n_head;
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;