summaryrefslogtreecommitdiff
path: root/ggml/src/ggml.c
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-07-20 10:05:23 +0200
committerGitHub <noreply@github.com>2025-07-20 10:05:23 +0200
commitf989fb03bd12752ad6e93717ca4bd298d5001d99 (patch)
tree7a127aba5c05667904b7e28a46d07c2d295ef619 /ggml/src/ggml.c
parent07673c6c33753487dd054dcff37f19d93d6c56d3 (diff)
Adding IQ1_KT - 1.75 bpw SOTA quants (#616)
* iq1_kt: basics * iq1_kt: CUDA dequantize Testing with LlaMA-3.1-8B-Instruct, we get almost the same PPL as iq2_xxs, so about 0.2 bpw fewer bits for the same quality. * iq1_kt: CUDA MMQ * iq1_kt: CUDA MMVQ * iq1_kt: AVX2 GEMM/GEMV * iq1_kt: convert/repack to q8_0_r8 (AVX2) * iq1_kt: slightly faster GEMV 18.6 t/s -> 19.4 t/s * iq1_kt: NEON GEMM/GEMV Pathetic as usual * iq1_kt: slightly faster NEON - still pathetic * iq1_kt: tiny bit better GEMV on NEON * iq1_kt: convert/repack to q8_0_r8 (NEON) * iq1_kt: very slightly faster convert/repack to q8_0_r8 on NEON * Adding frgotten file * iq1_kt: add to constants.py --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r--ggml/src/ggml.c26
1 files changed, 26 insertions, 0 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index b3982538..5aec6b0d 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1587,6 +1587,23 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 2,
},
+ [GGML_TYPE_IQ1_KT] = {
+ .type_name = "iq1_kt",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq1_kt),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq1_kt,
+ .from_float = quantize_row_iq1_kt,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq1_kt_ref,
+ .vec_dot = vec_dot_iq1_kt_q8_k,
+#if defined __AVX2__
+ .vec_dot_type = GGML_TYPE_Q8_2_X4,
+#else
+ .vec_dot_type = GGML_TYPE_Q8_0_X4,
+#endif
+ .nrows = 1,
+ .row_meta_size = 4,
+ },
[GGML_TYPE_IQ2_KT] = {
.type_name = "iq2_kt",
.blck_size = QK_K,
@@ -4600,6 +4617,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ2_K: wtype = GGML_TYPE_IQ2_K; break;
case GGML_FTYPE_MOSTLY_IQ2_K_R4: wtype = GGML_TYPE_IQ2_K_R4; break;
case GGML_FTYPE_MOSTLY_IQ2_KS: wtype = GGML_TYPE_IQ2_KS; break;
+ case GGML_FTYPE_MOSTLY_IQ1_KT: wtype = GGML_TYPE_IQ1_KT; break;
case GGML_FTYPE_MOSTLY_IQ2_KT: wtype = GGML_TYPE_IQ2_KT; break;
case GGML_FTYPE_MOSTLY_IQ3_KT: wtype = GGML_TYPE_IQ3_KT; break;
case GGML_FTYPE_MOSTLY_IQ4_KT: wtype = GGML_TYPE_IQ4_KT; break;
@@ -11379,6 +11397,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ1_KT:
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
@@ -11858,6 +11877,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ1_KT:
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
@@ -12034,6 +12054,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ1_KT:
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
@@ -15537,6 +15558,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ1_KT:
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
@@ -15953,6 +15975,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ1_KT:
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
@@ -16275,6 +16298,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ1_KT:
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
@@ -16914,6 +16938,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ1_KT:
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
@@ -23989,6 +24014,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ2_K: result = quantize_iq2_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_K_R4:result = quantize_iq2_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_KS: result = quantize_iq2_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ1_KT: result = quantize_iq1_kt (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_KT: result = quantize_iq2_kt (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ3_KT: result = quantize_iq3_kt (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_KT: result = quantize_iq4_kt (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;