diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2025-06-17 07:12:48 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-06-17 07:12:48 +0300 |
commit | 0f8f8b32e2d0c7e3ac8bbafee6965dcd1305d002 (patch) | |
tree | debb40f79053c891ff1cfa9839a7cca8505c1a04 /ggml/src/ggml.c | |
parent | 6fc5bbb657525bb1ef20b682e1cc4ab5fd44aba6 (diff) |
Much faster CPU prompt processing (part 1) (#531)
* q6_K dequantizing GEMM
* Much easier: just use different vec_dot types!
* WIP
* Finally q6_K x q8_2_x4 dot product works
* Very slightly better
* We don't need the changes in ggml.c
* Fix AVX2
* iq2_xs
* Fix AVX2
* iq2_s
* q3_K
* Fix q8_k_r8 on Zen4
* q3_K: repack to q8_k_r8 instead of q8_0_r8
With that we hit 360 t/s for LlaMA-3.1-8B on a Ryzen-7950X.
q8_k_r8 is 386 t/s, so for a batch size of 512 repacking costs
~7% of the time taken by the actual GEMM.
* q3_K: don't scale when all quants in a block are <= 127 when repacking
* iq2_s: repack to q8_k_r8 instead of q8_0_r8
* iq2_xs: rapck to q8_k_r8
* WIP
* iq2_xs: repack to q8_k_r8
* iq3_xxs: repack to q8_k_r8
* iq3_s: use q8_k_r8
* iq1_s: repack to q8_k_r8
* iq1_m: repack to q8_k_r8
* iq1_m: slightly faster
* Slightly faster
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r-- | ggml/src/ggml.c | 23 |
1 files changed, 6 insertions, 17 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 069533ae..a6260136 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -1036,7 +1036,12 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .from_float = quantize_row_q6_K, .from_float_ref = (ggml_from_float_t) quantize_row_q6_K_ref, .vec_dot = ggml_vec_dot_q6_K_q8_K, +#ifdef __AVX2__ + .vec_dot_type = GGML_TYPE_Q8_2_X4, +#else .vec_dot_type = GGML_TYPE_Q8_K, +#endif +// .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, .row_meta_size = 0, }, @@ -1062,7 +1067,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .from_float = quantize_row_q8_k_r8, .from_float_ref = (ggml_from_float_t) quantize_row_q8_k_r8_ref, .vec_dot = vec_dot_q8_k_r8_q8_k, - .vec_dot_type = GGML_TYPE_Q8_KR8, + .vec_dot_type = GGML_TYPE_Q8_K, .nrows = 1, .row_meta_size = 0, }, @@ -1075,11 +1080,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .from_float = quantize_row_iq2_xxs, .from_float_ref = (ggml_from_float_t)quantize_row_iq2_xxs_ref, .vec_dot = ggml_vec_dot_iq2_xxs_q8_K, -#ifdef __AVX2__ - .vec_dot_type = GGML_TYPE_Q8_2_X4, -#else .vec_dot_type = GGML_TYPE_Q8_K, -#endif .nrows = 1, .row_meta_size = 0, }, @@ -1131,11 +1132,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .from_float = quantize_row_iq3_xxs, .from_float_ref = (ggml_from_float_t)quantize_row_iq3_xxs_ref, .vec_dot = ggml_vec_dot_iq3_xxs_q8_K, -#ifdef __AVX2__ - .vec_dot_type = GGML_TYPE_Q8_2_X4, -#else .vec_dot_type = GGML_TYPE_Q8_K, -#endif .nrows = 1, .row_meta_size = 0, }, @@ -1161,11 +1158,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .from_float = quantize_row_iq3_s, .from_float_ref = (ggml_from_float_t)quantize_row_iq3_s_ref, .vec_dot = ggml_vec_dot_iq3_s_q8_K, -#ifdef __AVX2__ - .vec_dot_type = GGML_TYPE_Q8_2_X4, -#else .vec_dot_type = GGML_TYPE_Q8_K, -#endif .nrows = 1, .row_meta_size = 0, }, @@ -1217,11 +1210,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .from_float = quantize_row_iq1_s, .from_float_ref = (ggml_from_float_t)quantize_row_iq1_s_ref, .vec_dot = ggml_vec_dot_iq1_s_q8_K, -#ifdef __AVX2__ - .vec_dot_type = GGML_TYPE_Q8_2_X4, -#else .vec_dot_type = GGML_TYPE_Q8_K, -#endif .nrows = 1, .row_meta_size = 0, }, |