summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-10-27 17:01:23 +0300
committerGitHub <noreply@github.com>2023-10-27 17:01:23 +0300
commit2f9ec7e271220a78fe27c9e6ccbcc0dda31cda0f (patch)
treee36dcd292a133c5106376c6ddd90c68f446ddae6 /llama.cpp
parent34b2a5e1ee4fe6295fb4420eb91131d743694c65 (diff)
cuda : improve text-generation and batched decoding performance (#3776)
* cuda : prints wip * cuda : new cublas gemm branch for multi-batch quantized src0 * cuda : add F32 sgemm branch * cuda : fine-tune >= VOLTA params + use MMQ only for small batches * cuda : remove duplicated cuBLAS GEMM code * cuda : add CUDA_USE_TENSOR_CORES and GGML_CUDA_FORCE_MMQ macros * build : add compile option to force use of MMQ kernels
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp2
1 files changed, 0 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 61f30c39..cc8669b0 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5959,8 +5959,6 @@ static int llama_decode_internal(
}
}
- ggml_cuda_set_mul_mat_q(cparams.mul_mat_q);
-
// HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
if (!lctx.embedding.empty()) {
embeddings->backend = GGML_BACKEND_CPU;