summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp2
1 files changed, 0 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 61f30c39..cc8669b0 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5959,8 +5959,6 @@ static int llama_decode_internal(
}
}
- ggml_cuda_set_mul_mat_q(cparams.mul_mat_q);
-
// HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
if (!lctx.embedding.empty()) {
embeddings->backend = GGML_BACKEND_CPU;