diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-12-10 12:26:40 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-12-10 12:26:40 +0100 |
commit | 361174ee6aee8792b4fbb227b9bc328bf9bd6eb9 (patch) | |
tree | 887c0cb982664604ef977cdac27657d7c61c88a4 /ggml/src/ggml.c | |
parent | 3ec193b4856df8e5827b83a8c7686e8498c5e5b8 (diff) |
Q6_K_R4 (#130)
* Adding q6_k_r4
* q6_k_r4: 1st functional AVX2 version
* q6_k_r4: AVX2 and simple Zen4
"Simple" as in processing 4 instead of 8 rows at once.
On Zen4 we get PP-512(LLaMA-3.1-8B) = 238.3 t/s vs
195.2 t/s for Q6_K. TG-128 @ 1 thread is 7.94 t/s
vs 5.38 t/s for Q6_K.
* q6_k_r4: 1st NEON version
PP-512(LLaMA-3.1-8B) = 78 t/s vs 57.6 t/s for q6_K.
TG-128 is slightly lower rthan q6_K for low number of threads,
becomes very slightly better at 8 threads.
* q6_k_r4: slightly faster NEON
PP-512(LLaMA-3.1-8B) = 83.25 t/s
* q6_k_r4: slightly faster Zen4
238.3 t/s -> 243.2 t/s
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r-- | ggml/src/ggml.c | 22 |
1 files changed, 22 insertions, 0 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index fadda3e3..b92c2352 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -927,6 +927,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .nrows = 1, .row_meta_size = 0, }, + [GGML_TYPE_Q6_K_R4] = { + .type_name = "q6_k_r4", + .blck_size = QK_K, + .type_size = sizeof(block_q6_K), + .is_quantized = true, + .to_float = (ggml_to_float_t) dequantize_row_q6_k_r4, + .from_float = quantize_row_q6_k_r4, + .from_float_ref = (ggml_from_float_t) quantize_row_q6_k_r4_ref, + .vec_dot = vec_dot_q6_k_r4_q8_k, + .vec_dot_type = GGML_TYPE_Q8_K, + .nrows = 1, + .row_meta_size = 0, + }, [GGML_TYPE_IQ2_XXS] = { .type_name = "iq2_xxs", .blck_size = QK_K, @@ -4036,6 +4049,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) { case GGML_FTYPE_MOSTLY_Q4_K_R4: wtype = GGML_TYPE_Q4_K_R4; break; case GGML_FTYPE_MOSTLY_Q5_K: wtype = GGML_TYPE_Q5_K; break; case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break; + case GGML_FTYPE_MOSTLY_Q6_K_R4: wtype = GGML_TYPE_Q6_K_R4; break; case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break; case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break; case GGML_FTYPE_MOSTLY_IQ3_XXS: wtype = GGML_TYPE_IQ3_XXS; break; @@ -10567,6 +10581,7 @@ static void ggml_compute_forward_add( case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_Q6_K_R4: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: @@ -11017,6 +11032,7 @@ static void ggml_compute_forward_add1( case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_Q6_K_R4: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: @@ -11164,6 +11180,7 @@ static void ggml_compute_forward_acc( case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_Q6_K_R4: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: @@ -14357,6 +14374,7 @@ static void ggml_compute_forward_out_prod( case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_Q6_K_R4: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: @@ -14744,6 +14762,7 @@ static void ggml_compute_forward_set( case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_Q6_K_R4: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: @@ -15025,6 +15044,7 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_Q6_K_R4: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: @@ -15633,6 +15653,7 @@ static void ggml_compute_forward_clamp( case GGML_TYPE_Q4_K_R4: case GGML_TYPE_Q5_K: case GGML_TYPE_Q6_K: + case GGML_TYPE_Q6_K_R4: case GGML_TYPE_IQ2_XXS: case GGML_TYPE_IQ2_XS: case GGML_TYPE_IQ3_XXS: @@ -22469,6 +22490,7 @@ size_t ggml_quantize_chunk( case GGML_TYPE_Q4_K_R4: result = quantize_q4_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q5_K: result = quantize_q5_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_Q6_K: result = quantize_q6_K(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; + case GGML_TYPE_Q6_K_R4: result = quantize_q6_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ2_XXS: result = quantize_iq2_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ2_XS: result = quantize_iq2_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; case GGML_TYPE_IQ3_XXS: result = quantize_iq3_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break; |