summaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorPierrick Hymbert <pierrick.hymbert@gmail.com>2024-03-01 12:39:06 +0100
committerGitHub <noreply@github.com>2024-03-01 13:39:06 +0200
commit3ab8b3a92ede46df88bc5a2dfca3777de4a2b2b6 (patch)
treeb0232b770527ead35c5a59971a0802ded16e8c40 /llama.h
parent9600d59e010c18f5872580a21734ea1bf1968d04 (diff)
llama : cleanup unused mmq flags (#5772)
* cleanup unused --no-mul-mat-q,-nommq, -mmq, --mul-mat-q, mul_mat_q * remove: mul_mat_q in compare llama bench and usage * update llama-bench --------- Co-authored-by: slaren <slarengh@gmail.com>
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h1
1 files changed, 0 insertions, 1 deletions
diff --git a/llama.h b/llama.h
index 4d0ebe37..ed51f478 100644
--- a/llama.h
+++ b/llama.h
@@ -255,7 +255,6 @@ extern "C" {
enum ggml_type type_v; // data type for V cache
// Keep the booleans together to avoid misalignment during copy-by-value.
- bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
bool logits_all; // the llama_eval() call computes all logits, not just the last one (DEPRECATED - set llama_batch.logits instead)
bool embedding; // embedding mode only
bool offload_kqv; // whether to offload the KQV ops (including the KV cache) to GPU