diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-12-09 16:59:18 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-12-09 16:59:18 +0100 |
commit | 3ec193b4856df8e5827b83a8c7686e8498c5e5b8 (patch) | |
tree | 149666dbffdf1d443bb9ff8f2564ed9bb1959201 /ggml/src/ggml.c | |
parent | 43e65a672a98d931998559785b58f1e980e87f54 (diff) |
Q4_K_R4 (#129)
* Something is still wrong
* Simply don't see what is wrong
* q4_k_r4: finally works on Zen4
I had forgotten to prevent token_embd.weight being quantized
with q4_k_r4!
* q4_k_r4: AVX2
We get PP-512(LLaMA-3.1-8B) = 267 t/s on a Ryzen-5975WX.
This is ~30% better than Q4_K_S.
* q4_k_r4: NEON
We get PP-512(LLaMA-3.1-8B) = 110 t/s.
Not quite as good as q4_0_r4, but still a massive
improvement compared to he 69 t/s for q4_K.
* q4_k_r4: slightly better AVX2
PP-512 goes from 267 t/s to 282 t/s on Ryzen-5975WX
* Minor
* Minor
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r-- | ggml/src/ggml.c | 26 |
1 files changed, 24 insertions, 2 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 974e42b2..fadda3e3 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -888,6 +888,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .nrows = 1, .row_meta_size = 0, }, + [GGML_TYPE_Q4_K_R4] = { + .type_name = "q4_k_r4", + .blck_size = QK_K, + .type_size = sizeof(block_q4_K), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q4_k_r4, + .from_float = quantize_row_q4_k_r4, + .from_float_ref = (ggml_from_float_t) quantize_row_q4_k_r4_ref, + .vec_dot = vec_dot_q4_k_r4_q8_k, + .vec_dot_type = GGML_TYPE_Q8_K32, + .nrows = 1, + .row_meta_size = 0, + }, [GGML_TYPE_Q5_K] = { .type_name = "q5_K", .blck_size = QK_K, @@ -4020,8 +4033,9 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_Q2_K: wtype = GGML_TYPE_Q2_K; break; case GGML_FTYPE_MOSTLY_Q3_K: wtype = GGML_TYPE_Q3_K; break; case GGML_FTYPE_MOSTLY_Q4_K: wtype = GGML_TYPE_Q4_K; break; - case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break; - case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break; + case GGML_FTYPE_MOSTLY_Q4_K_R4: wtype = GGML_TYPE_Q4_K_R4; break; + case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break; + case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break; case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break; case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break; case GGML_FTYPE_MOSTLY_IQ3_XXS: wtype = GGML_TYPE_IQ3_XXS; break; @@ -10550,6 +10564,7 @@ static void ggml_compute_forward_add( case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: @@ -10999,6 +11014,7 @@ static void ggml_compute_forward_add1( case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: @@ -11145,6 +11161,7 @@ static void ggml_compute_forward_acc( case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: @@ -14337,6 +14354,7 @@ static void ggml_compute_forward_out_prod( case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: @@ -14723,6 +14741,7 @@ static void ggml_compute_forward_set( case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: @@ -15003,6 +15022,7 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: @@ -15610,6 +15630,7 @@ static void ggml_compute_forward_clamp( case GGML_TYPE_Q2_K: case GGML_TYPE_Q3_K: case GGML_TYPE_Q4_K: + case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: case GGML_TYPE_IQ2_XXS: @@ -22445,6 +22466,7 @@ size_t ggml_quantize_chunk( case GGML_TYPE_Q2_K: result = quantize_q2_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q3_K: result = quantize_q3_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q4_K: result = quantize_q4_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; + case GGML_TYPE_Q4_K_R4: result = quantize_q4_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q5_K: result = quantize_q5_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q6_K: result = quantize_q6_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ2_XXS: result = quantize_iq2_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; |