summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-09-05 10:46:39 +0300
committerGeorgi Gerganov <ggerganov@gmail.com>2023-09-05 10:46:39 +0300
commit35938ee3b0c16f1fbbf240dae21e0228864b938c (patch)
tree2f51edec4d1d1c22dc8d793a84933865d99651a7 /llama.cpp
parent921772104ba2219bfdc2b2980d05ebc0aa0c92a4 (diff)
llama : update logic for number of threads when using BLAS
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp7
1 files changed, 6 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index edf3b4ea..3413288f 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -2942,7 +2942,12 @@ static bool llama_eval_internal(
// for big prompts, if BLAS is enabled, it is better to use only one thread
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
- n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas() ? 1 : n_threads;
+ // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
+ // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
+ // with the BLAS calls. need a better solution
+ if (N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
+ n_threads = std::min(4, n_threads);
+ }
struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];