summaryrefslogtreecommitdiff
path: root/ggml/src/ggml.c
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-06-18 16:20:54 +0300
committerGitHub <noreply@github.com>2025-06-18 16:20:54 +0300
commitd85c64428e0598f2617a352aec9960242af45652 (patch)
tree66bd9be9d2ea315917b36adf1ac4cd24b9ba7a97 /ggml/src/ggml.c
parentc410cc72bbfcbdef9ce552b425ab7abbeb250dff (diff)
New IQ2_KT, IQ3_KT and IQ4_KT, V2 (#529)
* New iq4_kt trellis The new trellis generates int8_t values via sum_as_uint8_t[(ka * idx + kb) & 0x3f33f3f3f] - 126. CUDA dequantize works. AVX2 case Ny > 32 works, and we get 273 t/s for L3-8B. PPL is on par or even slightly lower than original QTIP trellis. * Something is not working with the AVX2 dot product * New iq4_kt: CUDA MMVQ * New iq4_kt: CUDA MMQ * For now have only iq4_kt use the new trellis * Fix iq2_kt that got broken along the way * New iq4_kt: AVX2 dot product finally works We get 13.6 t/s vs 8.4 t/s with the f16 trellis and f32 arithmetic. Still somewhat slower than other quants, but no longer pathetic. * New iq4_kt: fix vanilla AVX2 * New iq4_kt: NEON implementation We get very respectable PP-512 = 120 t/s. TG-128 is pathetic at 5.3 t/s, so 20+% slower than the f16 variant. * New iq4_kt: slightly faster NEON * New iq4_kt: slightly faster NEON * New iq4_kt: faster NEON We are now at 9.4 t/s, up from 6.6 t/s for the f16 trellis. * Minor * New iq4_kt trellis: not working Metal implementation * Remove the extra 4 bytes of row meta data that is no longer used * Cleanup * Adding forgottent file * Switching iq2_kt to new trellis - CUDA MMQ * New iq2_kt: CUDA GEMV * New iq2_kt: AVX2 dequantize * New iq2_kt: AVX2 GEMM/GEMV * Adding forgotten file * New iq2_kt: NEON GEMM/GEMV * New iq2_kt: slightly faster NEON GEMM * New iq2_kt: Metal - very slow. It seems Apple Silicon cannot quickly add 4 8-bit ints. Or I don't know how to do it - but I didn't find anything in the Metal Shading Language Specification. So, performance is quite a bit worse than the original trellis. * Add missing break * Trying @louiehelm's multiplier * CPU * iq3_kt: use integer trellis + CUDA dequantize and MMVQ * iq3_kt: MMQ * iq3_kt: AVX2 GEMM * iq3_kt: AVX2 GEMV * The trellis quants now need super-blocks of 256, so we need a check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r--ggml/src/ggml.c25
1 files changed, 15 insertions, 10 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 69b1b46d..cc056f89 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1596,10 +1596,10 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.from_float = quantize_row_iq2_kt,
.from_float_ref = (ggml_from_float_t)quantize_row_iq2_kt_ref,
.vec_dot = vec_dot_iq2_kt_q8_k,
-#ifdef __ARM_NEON
- .vec_dot_type = GGML_TYPE_F16,
+#if defined __AVX2__
+ .vec_dot_type = GGML_TYPE_Q8_2_X4,
#else
- .vec_dot_type = GGML_TYPE_F32,
+ .vec_dot_type = GGML_TYPE_Q8_0_X4,
#endif
.nrows = 1,
.row_meta_size = 4,
@@ -1613,11 +1613,16 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.from_float = quantize_row_iq3_kt,
.from_float_ref = (ggml_from_float_t)quantize_row_iq3_kt_ref,
.vec_dot = vec_dot_iq3_kt_q8_k,
-#ifdef __ARM_NEON
- .vec_dot_type = GGML_TYPE_F16,
+#if defined __AVX2__
+ .vec_dot_type = GGML_TYPE_Q8_2_X4,
#else
- .vec_dot_type = GGML_TYPE_F32,
+ .vec_dot_type = GGML_TYPE_Q8_0_X4,
#endif
+//#ifdef __ARM_NEON
+// .vec_dot_type = GGML_TYPE_F16,
+//#else
+// .vec_dot_type = GGML_TYPE_F32,
+//#endif
.nrows = 1,
.row_meta_size = 4,
},
@@ -1630,13 +1635,13 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.from_float = quantize_row_iq4_kt,
.from_float_ref = (ggml_from_float_t)quantize_row_iq4_kt_ref,
.vec_dot = vec_dot_iq4_kt_q8_k,
-#ifdef __ARM_NEON
- .vec_dot_type = GGML_TYPE_F16,
+#if defined __AVX2__
+ .vec_dot_type = GGML_TYPE_Q8_2_X4,
#else
- .vec_dot_type = GGML_TYPE_F32,
+ .vec_dot_type = GGML_TYPE_Q8_0_X4,
#endif
.nrows = 1,
- .row_meta_size = 8,
+ .row_meta_size = 4,
},
[GGML_TYPE_IQ3_K] = {
.type_name = "iq3_k",