summaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-04-18 23:54:57 +0300
committerGitHub <noreply@github.com>2023-04-18 23:54:57 +0300
commit77a73403ca8eaced2590559d0f9cebd2b3649d32 (patch)
tree7b95e7565ce86b81d8dd620117564da901ce3ce7 /llama.h
parent50a8a2af97cb92e53e7a3195aa201c3d87da5415 (diff)
ggml : add new Q4_2 quantization (ARM only) (#1046)
* ggml : Q4_2 ARM * ggml : add ggml_is_quantized() * llama : update llama_type_name() with Q4_2 entry * ggml : speed-up q4_2 - 4 threads: ~100ms -> ~90ms - 8 threads: ~55ms -> ~50ms * ggml : optimize q4_2 using vmlaq_n_f32 + vmulq_n_f32
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/llama.h b/llama.h
index c35193a8..208b03d1 100644
--- a/llama.h
+++ b/llama.h
@@ -72,6 +72,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_Q4_0 = 2, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1 = 3, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q4_1_SOME_F16 = 4, // tok_embeddings.weight and output.weight are F16
+ LLAMA_FTYPE_MOSTLY_Q4_2 = 5, // except 1d tensors
};
LLAMA_API struct llama_context_params llama_context_default_params();