From 3d92d7f802b332927669f01bfa51ebbb56e868ba Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Thu, 15 May 2025 16:02:39 +0300 Subject: Adding IQ5_KS - 5.25 bpw quants (#422) * iq5_ks: basics * iq5_ks: quantize * iq5_ks: CUDA dequantize works * iq5_ks: dot product works on CUDA * iq5_ks: MMQ works * iq5_ks: Zen4 * iq5_ks: AVX2 But is is not quite right, just like iq4_k, iq5_k, iq6_k, iq4_ks. All these need fixing on AVX2. * iq5_ks: NEON * iq5_ks: Metal dequantize * iq5_ks: Metal dot product --------- Co-authored-by: Iwan Kawrakow --- ggml/include/ggml.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'ggml/include') diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index beeb3c09..b6f461ed 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -425,6 +425,7 @@ extern "C" { GGML_TYPE_Q8_KR8 = 149, GGML_TYPE_Q8_K128 = 150, GGML_TYPE_Q8_KV = 151, + GGML_TYPE_IQ5_KS = 152, GGML_TYPE_Q4_0_R8 = 202, GGML_TYPE_Q5_0_R4 = 206, @@ -512,6 +513,7 @@ extern "C" { GGML_FTYPE_MOSTLY_IQ2_KS = 138, // except 1d tensors GGML_FTYPE_MOSTLY_IQ4_KSS = 139, // except 1d tensors GGML_FTYPE_MOSTLY_Q8_KV = 140, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ5_KS = 141, // except 1d tensors // GGML_FTYPE_MOSTLY_Q4_0_R8 = 202, // except 1d tensors GGML_FTYPE_MOSTLY_Q8_0_R8 = 207, // except 1d tensors -- cgit v1.2.3