summaryrefslogtreecommitdiff
path: root/ggml/src/ggml.c
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2024-10-09 12:54:40 +0300
committerGitHub <noreply@github.com>2024-10-09 12:54:40 +0300
commitb30c9e10d8710a49b2d2ab98d086b9f11bfaa228 (patch)
treed2d0feb6ca78d3393a88acf81459e2f31d17c93a /ggml/src/ggml.c
parentc0ddc644bbb53d1fac10cac454756657b5f1ba32 (diff)
New SOTA quantization: 4.25 bpw IQ4_KS (#83)
* iq4_k_xxs: basics * WIP + adding iq3_kl quantization mix * iq4_xxs: this looks very viable compared to iq4_xs At the same 4.25 bpw PPL is always better, for some models significantly better. I'll rename to iq4_ks and keep it. * iq4_xxs: CUDA dot product We get TG-128 = 126 t/s for LLaMA-3.1-8B, compared to 123 t/s for q4_0. * iq4_xxs: scalar CPU dot product Also fix the breakage I caused with the dedicated work buffer quantization portion when the multiplication is not done via iqk_mul_mat. * iq4_xxs: Zen4 I noticed that iq4_xs is wrong on Zen4 (and possibly AVX2). Again the same mistake of packing int32_t back to int16_t, which overflows occasionally (just occasionally, that's why the result doesn't look completely wrong, so I didn't notice). * Fix iq4_xs (Zen4) * iq4_xxs: AVX2 * iq4_xxs: ARM_NEON * iq4_xxs: Metal * iq4_xxs: slightly faster TG on Metal * iq4_xxs: rename to iq4_ks After all, tt is a smaller variant of iq4_k. * iq3_kl: use iq4_ks instead of iq4_k/iq4_xs --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r--ggml/src/ggml.c29
1 files changed, 28 insertions, 1 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 7ad666eb..97fa81b1 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1087,6 +1087,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 0,
},
+ [GGML_TYPE_IQ4_KS] = {
+ .type_name = "iq4_ks",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq4_ks),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq4_ks,
+ .from_float = quantize_row_iq4_ks,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq4_ks_ref,
+ .vec_dot = vec_dot_iq4_ks_q8_k,
+ .vec_dot_type = GGML_TYPE_Q8_K,
+ .nrows = 1,
+ .row_meta_size = 4,
+ },
[GGML_TYPE_Q8_K] = {
.type_name = "q8_K",
.blck_size = QK_K,
@@ -3891,6 +3904,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ1_TN: wtype = GGML_TYPE_IQ1_TN; break;
case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
+ case GGML_FTYPE_MOSTLY_IQ4_KS: wtype = GGML_TYPE_IQ4_KS; break;
case GGML_FTYPE_MOSTLY_IQ2_K: wtype = GGML_TYPE_IQ2_K; break;
case GGML_FTYPE_MOSTLY_IQ3_K: wtype = GGML_TYPE_IQ3_K; break;
case GGML_FTYPE_MOSTLY_IQ4_K: wtype = GGML_TYPE_IQ4_K; break;
@@ -10390,6 +10404,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
@@ -10778,6 +10793,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
@@ -10916,6 +10932,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
@@ -13262,7 +13279,7 @@ static void ggml_compute_forward_mul_mat_one_chunk(
return;
}
- const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
+ const void * wdata = (src1->type == vec_dot_type) ? src1->data : (char *)params->wdata + params->wsize - params->qsize + GGML_MAX_NAME;
const size_t row_size = ggml_row_size(vec_dot_type, ne10);
assert(ne12 % ne02 == 0);
@@ -13517,6 +13534,11 @@ IQK_MulMat_Not_Available2:;
UseGgmlGemm2:;
#endif
+ if (ith == 0) {
+ atomic_store(&params->shared->current_chunk, nth);
+ }
+ ggml_barrier(params->shared);
+
// This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers)
const int64_t nr0 = ne0;
@@ -14095,6 +14117,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
@@ -14473,6 +14496,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
@@ -14745,6 +14769,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
@@ -15344,6 +15369,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
@@ -22160,6 +22186,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ1_TN: result = quantize_iq1_tn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ4_KS: result = quantize_iq4_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_K: result = quantize_iq2_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ3_K: result = quantize_iq3_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_K: result = quantize_iq4_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;