summaryrefslogtreecommitdiff
path: root/ggml/src/iqk/iqk_mul_mat.cpp
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-06-18 07:29:33 +0300
committerGitHub <noreply@github.com>2025-06-18 07:29:33 +0300
commitdc96820ddb45c639ea4e149e4bbfcb0b67fbcc2b (patch)
tree2ac3011164d541f5899db1afdad375cc59bfc142 /ggml/src/iqk/iqk_mul_mat.cpp
parent8b3002bba2ea64b1de9ca2ff87207d8c37b0f08e (diff)
Much faster CPU prompt processing (part 2) (#533)
* iq4_ks 203 t/s -> 357 t/s. iq4_ks_r4 is 242 t/s. * iq4_k 175 t/s -> 353 t/s. iq4_k_r4 is 208 t/s. PPL is actually lower! * iq5_ks 180 t/s -> 359 t/s. iq5_ks_r4 is 210 t/s. PPL is actually lower - 7.4160 vs 7.4494 for LlaMA-3.1-8B-Instruct * iq5_k - accuracy loss is too big * iq5_k - there was a bug with the shifts ...and that's why PPL was so high. It is also high on main. This fixes it. * iq6_k 148 t/s -> 350 t/s. There is no iq6_k_r4 PPL is actually lower because we have a bug in the existing implementation! * iq3_k 169 t/s -> 363 t/s. iq3_k_r4 is at 200 t/s. * iq2_k 190 t/s -> 364 t/s. iq2_k_r4 is at 232 t/s. * iq2_ks 200 t/s -> 367 t/s. There is no iq2_ks_r4. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/iqk/iqk_mul_mat.cpp')
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp28
1 files changed, 18 insertions, 10 deletions
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index 0b29a572..81b5841d 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -250,6 +250,14 @@ struct MulMat {
case GGML_TYPE_Q4_K : return nrc_y >= 32 ? GGML_TYPE_Q8_1 : type;
case GGML_TYPE_Q5_K : return nrc_y >= 32 ? GGML_TYPE_Q8_1 : type;
case GGML_TYPE_Q6_K : return nrc_y >= 64 ? GGML_TYPE_Q8_0_R8 : type;
+ case GGML_TYPE_IQ2_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ2_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ3_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ4_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ4_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ5_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ5_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ6_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
default: break;
}
#else
@@ -375,22 +383,22 @@ bool iqk_convert_repack(int typeA, int n, const void * vx, size_t bx, void * vy,
case GGML_TYPE_IQ3_XXS_R4:
case GGML_TYPE_IQ3_S_R4:
return iqk_convert_iquants_q80_r8(typeA, n, vx, bx, vy, nrc_x);
- //case GGML_TYPE_IQ4_KS:
- //case GGML_TYPE_IQ5_KS:
- //case GGML_TYPE_IQ4_KSS:
- //case GGML_TYPE_IQ2_K:
- //case GGML_TYPE_IQ2_KS:
- //case GGML_TYPE_IQ3_K:
- //case GGML_TYPE_IQ4_K:
- //case GGML_TYPE_IQ5_K:
- //case GGML_TYPE_IQ6_K:
+ case GGML_TYPE_IQ2_KS:
+ case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ4_KS:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_KS:
+ case GGML_TYPE_IQ5_K:
+ case GGML_TYPE_IQ6_K:
//case GGML_TYPE_IQ2_K_R4:
//case GGML_TYPE_IQ3_K_R4:
//case GGML_TYPE_IQ4_K_R4:
//case GGML_TYPE_IQ5_K_R4:
//case GGML_TYPE_IQ4_KS_R4:
//case GGML_TYPE_IQ5_KS_R4:
- // return iqk_set_kernels_iqk_quants(ne00, typeA, typeB, mm.funcs, mm.func16);
+ return iqk_convert_iqk_quants_q80_r8(typeA, n, vx, bx, vy, nrc_x);
case GGML_TYPE_IQ2_KT:
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT: