From f989fb03bd12752ad6e93717ca4bd298d5001d99 Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Sun, 20 Jul 2025 10:05:23 +0200 Subject: Adding IQ1_KT - 1.75 bpw SOTA quants (#616) * iq1_kt: basics * iq1_kt: CUDA dequantize Testing with LlaMA-3.1-8B-Instruct, we get almost the same PPL as iq2_xxs, so about 0.2 bpw fewer bits for the same quality. * iq1_kt: CUDA MMQ * iq1_kt: CUDA MMVQ * iq1_kt: AVX2 GEMM/GEMV * iq1_kt: convert/repack to q8_0_r8 (AVX2) * iq1_kt: slightly faster GEMV 18.6 t/s -> 19.4 t/s * iq1_kt: NEON GEMM/GEMV Pathetic as usual * iq1_kt: slightly faster NEON - still pathetic * iq1_kt: tiny bit better GEMV on NEON * iq1_kt: convert/repack to q8_0_r8 (NEON) * iq1_kt: very slightly faster convert/repack to q8_0_r8 on NEON * Adding frgotten file * iq1_kt: add to constants.py --------- Co-authored-by: Iwan Kawrakow --- gguf-py/gguf/constants.py | 2 ++ 1 file changed, 2 insertions(+) (limited to 'gguf-py/gguf/constants.py') diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 767637c5..32a667e2 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -1322,6 +1322,7 @@ class GGMLQuantizationType(IntEnum): IQ4_KT = 155 IQ3_KS = 156 IQ2_KL = 157 + IQ1_KT = 158 Q4_0_R8 = 202 Q5_0_R4 = 206 Q8_0_R8 = 208 @@ -1539,6 +1540,7 @@ GGML_QUANT_SIZES: dict[GGMLQuantizationType, tuple[int, int]] = { GGMLQuantizationType.IQ4_KT : ( 256, 128), GGMLQuantizationType.IQ3_KS : ( 256, 102), GGMLQuantizationType.IQ2_KL : ( 256, 86), + GGMLQuantizationType.IQ1_KT : ( 256, 56), GGMLQuantizationType.Q4_0_R8 : ( 32, 18), GGMLQuantizationType.Q5_0_R4 : ( 32, 22), GGMLQuantizationType.Q8_0_R8 : ( 32, 34), -- cgit v1.2.3