diff options
author | Johannes Gäßler <johannesg@5d6.de> | 2023-09-21 10:43:53 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-09-21 11:43:53 +0300 |
commit | 8185710a80531e9ee0c0cb99d3a9c9af1019ab67 (patch) | |
tree | 1486dc344c960bf215581b111bd66cee69a1396b | |
parent | 7eb41179edc56083ef4eb2df7967ac9ff38b34fb (diff) |
CUDA: use only 1 thread if fully offloaded (#2915)
-rw-r--r-- | llama.cpp | 9 |
1 files changed, 9 insertions, 0 deletions
@@ -3765,6 +3765,15 @@ static bool llama_eval_internal( n_threads = std::min(4, n_threads); } + // If all tensors can be run on the GPU then using more than 1 thread is detrimental. + const bool full_offload_supported = model.arch == LLM_ARCH_LLAMA || + model.arch == LLM_ARCH_BAICHUAN || + model.arch == LLM_ARCH_FALCON; + const bool fully_offloaded = model.n_gpu_layers >= (int) hparams.n_layer + 3; + if (ggml_cpu_has_cublas() && full_offload_supported && fully_offloaded) { + n_threads = 1; + } + struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1]; struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2]; |