diff options
author | slaren <2141330+slaren@users.noreply.github.com> | 2023-04-19 11:22:45 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-04-19 11:22:45 +0200 |
commit | 8944a1329648c57bb7d66851170938230587a52c (patch) | |
tree | a4b759e0a1ee93fa542dc708fc938b660ff13eac /llama.cpp | |
parent | 66674012389f9537140044290f8517bc4a5e0b74 (diff) |
Add NVIDIA cuBLAS support (#1044)
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
@@ -1069,7 +1069,7 @@ static bool llama_eval_internal( // for big prompts, if BLAS is enabled, it is better to use only one thread // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance ggml_cgraph gf = {}; - gf.n_threads = N >= 32 && ggml_cpu_has_blas() ? 1 : n_threads; + gf.n_threads = N >= 32 && ggml_cpu_has_blas() && !ggml_cpu_has_cublas() ? 1 : n_threads; struct ggml_tensor * embd = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, N); memcpy(embd->data, tokens, N*ggml_element_size(embd)); |