diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-12-03 12:59:22 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-12-03 12:59:22 +0100 |
commit | c5bf589367cd609f4c0ff73a6534bbde7902abe8 (patch) | |
tree | fa17f82c717d535222c1843fc9fca2d66f4d6ea7 /ggml/src/ggml.c | |
parent | ccec00939a30aa7762a232ac4dcadba985ef9ee4 (diff) |
Q5_0_R4 (#121)
* Adding q5_0_r4
We get PP-512(LLaMA-3.1-8B) = 256.7 t/s on a Ryzen-7950X.
We even get TG-128 improvement to 11.7 t/s from 11.1 t/s.
* q5_0_r4: NEON
We get PP-512(LLaMA-3.1-8B) = 99.6 t/s on M2-Max,
up from 71.0 t/s for Q5_0. The difference to mainline llama.cpp
is no longer funny: they get 26.5 t/s for Q5_0.
For TG, we are nor able to fully saturate memory bandwidth
and arrive at 22.1 t/s @ 8 threads. Mainline llama.cpp gets
20.6 t/s for Q5_0.
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r-- | ggml/src/ggml.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index fd65ae67..0eb76a07 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -1296,6 +1296,23 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .nrows = 1, .row_meta_size = 0, }, + [GGML_TYPE_Q5_0_R4] = { + .type_name = "q5_0_r4", + .blck_size = QK5_0, + .type_size = sizeof(block_q5_0), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q5_0_r4, + .from_float = quantize_row_q5_0_r4, + .from_float_ref = (ggml_from_float_t)quantize_row_q5_0_r4_ref, + .vec_dot = vec_dot_q5_0_r4_q8_0, +#if GGML_USE_IQK_MULMAT && defined __AVX2__ + .vec_dot_type = GGML_TYPE_Q8_1, +#else + .vec_dot_type = GGML_TYPE_Q8_0, +#endif + .nrows = 1, + .row_meta_size = 0, + }, }; // For internal test use @@ -3956,6 +3973,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break; case GGML_FTYPE_MOSTLY_IQ4_NL_X4: wtype = GGML_TYPE_IQ4_NL_X4;break; case GGML_FTYPE_MOSTLY_Q4_0_R4: wtype = GGML_TYPE_Q4_0_R4; break; + case GGML_FTYPE_MOSTLY_Q5_0_R4: wtype = GGML_TYPE_Q5_0_R4; break; case GGML_FTYPE_MOSTLY_Q8_0_R4: wtype = GGML_TYPE_Q8_0_R4; break; case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break; case GGML_FTYPE_MOSTLY_IQ4_KS: wtype = GGML_TYPE_IQ4_KS; break; @@ -10482,6 +10500,7 @@ static void ggml_compute_forward_add( case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: + case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -10927,6 +10946,7 @@ static void ggml_compute_forward_add1( case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: + case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -11069,6 +11089,7 @@ static void ggml_compute_forward_acc( case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: + case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -14257,6 +14278,7 @@ static void ggml_compute_forward_out_prod( case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: + case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -14639,6 +14661,7 @@ static void ggml_compute_forward_set( case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: + case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -14915,6 +14938,7 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: + case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -15518,6 +15542,7 @@ static void ggml_compute_forward_clamp( case GGML_TYPE_IQ4_NL: case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: + case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -22347,6 +22372,7 @@ size_t ggml_quantize_chunk( case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_NL_X4: result = quantize_iq4_nl_x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q4_0_R4: result = quantize_q4_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; + case GGML_TYPE_Q5_0_R4: result = quantize_q5_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q8_0_R4: result = quantize_q8_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_KS: result = quantize_iq4_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; |