diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-12-03 14:48:26 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-12-03 14:48:26 +0100 |
commit | f1f4eb988fe5ee969100cd0d3782fd7460d13949 (patch) | |
tree | 97bb1a75ba7189f05e82835de6b2b65661a1ce7a /ggml/src/ggml.c | |
parent | c5bf589367cd609f4c0ff73a6534bbde7902abe8 (diff) |
Q6_0_R4 (#122)
* Adding q6_0_r4
We get PP-512(LLaMA-3.1-8B) = 257 t/s on a Ryzen-7950X.
* q6_0_r4: NEON
We get PP-512(LLaMA-3.1-8B) = 95 t/s on M2-Max.
In terms of ops, q6_0_r4 is identical to q5_0_r4
except for loading the high bits being
vld1q_u8_x2 instead of vld1q_u8. It is strange that
this can make a 5% difference in performance, especially
considering that this is amortized (re-used) over 8 columns
in the right matrix. Or am I running out of vector registers?
* Fix AVX2
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r-- | ggml/src/ggml.c | 26 |
1 files changed, 26 insertions, 0 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 0eb76a07..203b1b57 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -1313,6 +1313,23 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .nrows = 1, .row_meta_size = 0, }, + [GGML_TYPE_Q6_0_R4] = { + .type_name = "q6_0_r4", + .blck_size = QK6_0, + .type_size = sizeof(block_q6_0), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q6_0_r4, + .from_float = quantize_row_q6_0_r4, + .from_float_ref = (ggml_from_float_t)quantize_row_q6_0_r4_ref, + .vec_dot = vec_dot_q6_0_r4_q8_0, +#if GGML_USE_IQK_MULMAT && defined __AVX2__ + .vec_dot_type = GGML_TYPE_Q8_1, +#else + .vec_dot_type = GGML_TYPE_Q8_0, +#endif + .nrows = 1, + .row_meta_size = 0, + }, }; // For internal test use @@ -3974,6 +3991,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_IQ4_NL_X4: wtype = GGML_TYPE_IQ4_NL_X4;break; case GGML_FTYPE_MOSTLY_Q4_0_R4: wtype = GGML_TYPE_Q4_0_R4; break; case GGML_FTYPE_MOSTLY_Q5_0_R4: wtype = GGML_TYPE_Q5_0_R4; break; + case GGML_FTYPE_MOSTLY_Q6_0_R4: wtype = GGML_TYPE_Q6_0_R4; break; case GGML_FTYPE_MOSTLY_Q8_0_R4: wtype = GGML_TYPE_Q8_0_R4; break; case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break; case GGML_FTYPE_MOSTLY_IQ4_KS: wtype = GGML_TYPE_IQ4_KS; break; @@ -10501,6 +10519,7 @@ static void ggml_compute_forward_add( case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: + case GGML_TYPE_Q6_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -10947,6 +10966,7 @@ static void ggml_compute_forward_add1( case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: + case GGML_TYPE_Q6_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -11090,6 +11110,7 @@ static void ggml_compute_forward_acc( case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: + case GGML_TYPE_Q6_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -14279,6 +14300,7 @@ static void ggml_compute_forward_out_prod( case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: + case GGML_TYPE_Q6_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -14662,6 +14684,7 @@ static void ggml_compute_forward_set( case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: + case GGML_TYPE_Q6_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -14939,6 +14962,7 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: + case GGML_TYPE_Q6_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -15543,6 +15567,7 @@ static void ggml_compute_forward_clamp( case GGML_TYPE_IQ4_NL_X4: case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: + case GGML_TYPE_Q6_0_R4: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -22373,6 +22398,7 @@ size_t ggml_quantize_chunk( case GGML_TYPE_IQ4_NL_X4: result = quantize_iq4_nl_x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q4_0_R4: result = quantize_q4_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q5_0_R4: result = quantize_q5_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; + case GGML_TYPE_Q6_0_R4: result = quantize_q6_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q8_0_R4: result = quantize_q8_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ4_KS: result = quantize_iq4_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; |