From ccec00939a30aa7762a232ac4dcadba985ef9ee4 Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Tue, 3 Dec 2024 06:15:29 +0100 Subject: Q8_0_R4 (#120) * Adding q8_0_r4 We get PP-512(LLaMA-3.1-8B) = 268 t/s on a Ryzen-7950X compared to 175.6 t/s for Q8_0. * q8_0_r4: NEON We get PP-512(LLaMA-3.1-8B) = 112.6 t/s on M2-Max. * q8_0_r4: Zen4 matrix-vector specialization --------- Co-authored-by: Iwan Kawrakow --- ggml/include/ggml.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'ggml/include/ggml.h') diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index 1a46881a..2358fb76 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -408,6 +408,7 @@ extern "C" { GGML_TYPE_IQ4_KSS = 146, GGML_TYPE_Q4_0_R4 = 202, + GGML_TYPE_Q8_0_R4 = 208, GGML_TYPE_IQ4_NL_X4 = 220, GGML_TYPE_COUNT, }; @@ -469,6 +470,7 @@ extern "C" { GGML_FTYPE_MOSTLY_IQ4_KSS = 139, // except 1d tensors // GGML_FTYPE_MOSTLY_Q4_0_R4 = 202, // except 1d tensors + GGML_FTYPE_MOSTLY_Q8_0_R4 = 207, // except 1d tensors GGML_FTYPE_MOSTLY_IQ4_NL_X4 = 219, // except 1d tensors }; -- cgit v1.2.3