From 3d92d7f802b332927669f01bfa51ebbb56e868ba Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Thu, 15 May 2025 16:02:39 +0300 Subject: Adding IQ5_KS - 5.25 bpw quants (#422) * iq5_ks: basics * iq5_ks: quantize * iq5_ks: CUDA dequantize works * iq5_ks: dot product works on CUDA * iq5_ks: MMQ works * iq5_ks: Zen4 * iq5_ks: AVX2 But is is not quite right, just like iq4_k, iq5_k, iq6_k, iq4_ks. All these need fixing on AVX2. * iq5_ks: NEON * iq5_ks: Metal dequantize * iq5_ks: Metal dot product --------- Co-authored-by: Iwan Kawrakow --- examples/quantize/quantize.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'examples/quantize') diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 60cf260c..1b388a73 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -68,6 +68,7 @@ static const std::vector QUANT_OPTIONS = { { "IQ4_KS", LLAMA_FTYPE_MOSTLY_IQ4_KS, " 4.25 bpw non-linear quantization", }, { "IQ4_KS_R4",LLAMA_FTYPE_MOSTLY_IQ4_KS_R4,"IQ4_KS repacked", }, { "IQ4_KSS", LLAMA_FTYPE_MOSTLY_IQ4_KSS, " 4.0 bpw non-linear quantization", }, + { "IQ5_KS", LLAMA_FTYPE_MOSTLY_IQ5_KS, " 5.25 bpw non-linear quantization", }, { "IQ2_K", LLAMA_FTYPE_MOSTLY_IQ2_K, " 2.375 bpw non-linear quantization",}, { "IQ2_K_R4", LLAMA_FTYPE_MOSTLY_IQ2_K_R4, "IQ2_K repacked",}, { "IQ2_KS", LLAMA_FTYPE_MOSTLY_IQ2_KS, " 2.1875 bpw non-linear quantization",}, -- cgit v1.2.3