summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorslaren <2141330+slaren@users.noreply.github.com>2023-04-19 11:22:45 +0200
committerGitHub <noreply@github.com>2023-04-19 11:22:45 +0200
commit8944a1329648c57bb7d66851170938230587a52c (patch)
treea4b759e0a1ee93fa542dc708fc938b660ff13eac /llama.cpp
parent66674012389f9537140044290f8517bc4a5e0b74 (diff)
Add NVIDIA cuBLAS support (#1044)
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index f14324fc..3ff5dc1e 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1069,7 +1069,7 @@ static bool llama_eval_internal(
// for big prompts, if BLAS is enabled, it is better to use only one thread
// otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
ggml_cgraph gf = {};
- gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads;
+ gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_cublas() ? 1 : n_threads;
struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N);
memcpy(embd->data, tokens, N*ggml_element_size(embd));