diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2025-07-14 18:55:08 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-07-14 18:55:08 +0200 |
commit | 45fae1a14444622478774f9a417e1d417af1ca46 (patch) | |
tree | 2609ef06be5640749834d4fc691446771ab29f42 /ggml/src/ggml.c | |
parent | f5353047ef461e6fc9d527e09a06c9802c699929 (diff) |
Adding IQ2_KL (#602)
* Experiments for 2.6875 bpw quants
At least according to rmse, this is significantly better than
q2_K, while using only 1/16 more bits per weight.
* iq2_kl: basics
* iq2_kl: CUDA dequantize
* iq2_kl: small improvement in PPL
Also check the two neighbouring values for the block scale
and use the one that minimizes RMSE.
* iq2_kl: MMQ
Quite good: PP-512(L3-8B) = 8472 t/s.
* iq2_kl: MMVQ
We get PP-128(L3-8B) = 162 t/s.
Which means that this is not quite as good as it should be as
(almost) same bpq q2_K is at 170 t/s.
* iq2_kl: Zen4 GEMM/GEMV
Not particularly fast. I may need to think about rearranging the bits.
* iq2_kl: better Zen4
* iq2_kl: convert/repack to q8_k_r8 (AVX2)
* iq2_kl: AVX2 GEMM/GEMV
* iq2_kl: WIP NEON
The compiler started crashing!!!
* iq2_kl: NEON
Had to work around a compiler crash when using vzip2q_u8 using
vqtbl2q_u8.
* iq2_kl: convert/repack to q8_k_r8 (NEON)
* iq2_kl: Metal dequantize
* iq2_kl: Metal GEMV - pretty slow
* iq2_kl: Metal GEMV - slightly better (40 t/s -> 44.5 t/s)
* iq2_kl: Metal GEMV - slightly better (44.5 t/s -> 46.5 t/s)
* iq2_kl: Metal GEMV - slightly better (46.5 t/s -> 47.2 t/s)
* iq2_kl: slightly better Metal dequantize
PP-512 goes to 476 t/s up from 466 t/s.
* iq2_kl: slightly better Metal dequantize
PP-512 goes to 492 t/s up from 476 t/s.
* Add iq2_kl to constants.py
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r-- | ggml/src/ggml.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 2e6983df..dbb080f8 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -1669,6 +1669,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .nrows = 1, .row_meta_size = 2, }, + [GGML_TYPE_IQ2_KL] = { + .type_name = "iq2_kl", + .blck_size = QK_K, + .type_size = sizeof(block_iq2_kl), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_iq2_kl, + .from_float = quantize_row_iq2_kl, + .from_float_ref = (ggml_from_float_t)quantize_row_iq2_kl_ref, + .vec_dot = vec_dot_iq2_kl_q8_k, + .vec_dot_type = GGML_TYPE_Q8_K, + .nrows = 1, + .row_meta_size = 2, + }, [GGML_TYPE_IQ4_K] = { .type_name = "iq4_k", .blck_size = QK_K, @@ -4592,6 +4605,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_IQ4_KT: wtype = GGML_TYPE_IQ4_KT; break; case GGML_FTYPE_MOSTLY_IQ3_K: wtype = GGML_TYPE_IQ3_K; break; case GGML_FTYPE_MOSTLY_IQ3_KS: wtype = GGML_TYPE_IQ3_KS; break; + case GGML_FTYPE_MOSTLY_IQ2_KL: wtype = GGML_TYPE_IQ2_KL; break; case GGML_FTYPE_MOSTLY_IQ4_K: wtype = GGML_TYPE_IQ4_K; break; case GGML_FTYPE_MOSTLY_IQ3_K_R4: wtype = GGML_TYPE_IQ3_K_R4; break; case GGML_FTYPE_MOSTLY_IQ4_K_R4: wtype = GGML_TYPE_IQ4_K_R4; break; @@ -11362,6 +11376,7 @@ static void ggml_compute_forward_add( case GGML_TYPE_IQ4_KT: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ3_KS: + case GGML_TYPE_IQ2_KL: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: @@ -11840,6 +11855,7 @@ static void ggml_compute_forward_add1( case GGML_TYPE_IQ4_KT: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ3_KS: + case GGML_TYPE_IQ2_KL: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: @@ -12015,6 +12031,7 @@ static void ggml_compute_forward_acc( case GGML_TYPE_IQ4_KT: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ3_KS: + case GGML_TYPE_IQ2_KL: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: @@ -15517,6 +15534,7 @@ static void ggml_compute_forward_out_prod( case GGML_TYPE_IQ4_KT: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ3_KS: + case GGML_TYPE_IQ2_KL: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: @@ -15932,6 +15950,7 @@ static void ggml_compute_forward_set( case GGML_TYPE_IQ4_KT: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ3_KS: + case GGML_TYPE_IQ2_KL: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: @@ -16253,6 +16272,7 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_IQ4_KT: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ3_KS: + case GGML_TYPE_IQ2_KL: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: @@ -16891,6 +16911,7 @@ static void ggml_compute_forward_clamp( case GGML_TYPE_IQ4_KT: case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ3_KS: + case GGML_TYPE_IQ2_KL: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ3_K_R4: case GGML_TYPE_IQ4_K_R4: @@ -23965,6 +23986,7 @@ size_t ggml_quantize_chunk( case GGML_TYPE_IQ4_KT: result = quantize_iq4_kt (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ3_K: result = quantize_iq3_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ3_KS: result = quantize_iq3_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; + case GGML_TYPE_IQ2_KL: result = quantize_iq2_kl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_K: result = quantize_iq4_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ3_K_R4:result = quantize_iq3_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_K_R4:result = quantize_iq4_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; |