summaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-10-27 17:01:23 +0300
committerGitHub <noreply@github.com>2023-10-27 17:01:23 +0300
commit2f9ec7e271220a78fe27c9e6ccbcc0dda31cda0f (patch)
treee36dcd292a133c5106376c6ddd90c68f446ddae6 /llama.h
parent34b2a5e1ee4fe6295fb4420eb91131d743694c65 (diff)
cuda : improve text-generation and batched decoding performance (#3776)
* cuda : prints wip * cuda : new cublas gemm branch for multi-batch quantized src0 * cuda : add F32 sgemm branch * cuda : fine-tune >= VOLTA params + use MMQ only for small batches * cuda : remove duplicated cuBLAS GEMM code * cuda : add CUDA_USE_TENSOR_CORES and GGML_CUDA_FORCE_MMQ macros * build : add compile option to force use of MMQ kernels
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/llama.h b/llama.h
index 2f2fee0e..beac9a0c 100644
--- a/llama.h
+++ b/llama.h
@@ -178,7 +178,7 @@ extern "C" {
float rope_freq_scale; // RoPE frequency scaling factor, 0 = from model
// Keep the booleans together to avoid misalignment during copy-by-value.
- bool mul_mat_q; // if true, use experimental mul_mat_q kernels
+ bool mul_mat_q; // if true, use experimental mul_mat_q kernels (DEPRECATED - always true)
bool f16_kv; // use fp16 for KV cache, fp32 otherwise
bool logits_all; // the llama_eval() call computes all logits, not just the last one
bool embedding; // embedding mode only