summaryrefslogtreecommitdiff
path: root/ggml.c
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-01-30 15:14:12 +0200
committerGitHub <noreply@github.com>2024-01-30 15:14:12 +0200
commitf4d7e5497485ce6ce0e322533930b7da4657dd2d (patch)
tree78b30048cb4a9c78d5cf3e231a1ac3e9ed190577 /ggml.c
parent2256f36b79a932a478d4dcdf02c1e5a60056e5f3 (diff)
SOTA 3-bit quants (#5196)
* iq3_xxs: quantize/dequantize RMSE seems a bit high-ish at about half-way between q2_K and q3_K, so need to check more. * iq3_xxs: CUDA dequantize works * iq2_xxs: tuning quantization * iq3_xxs: starting to look better PPL on wiki.test.raw LLaMA-v1-7B: 6.4218 LLaMA-v2-7B: 6.3560 Mistral-7B : 6.0717 This is better than Q3_K_XS, with a 5% reduction in quantized model size. * iq3_xxs: CUDA dot product We have PP-512: 5891 t/s TG-128: 143.9 t/s * iq3_xxs: scalar and AVX2 dot products * iq3_xxs: ARM_NEON and Metal Metal performance is decent, ARM_NEON is pathetic * iq3_xxs: slightly better grid points * Faster iq3_xxs and iq2_xs dot products on CUDA * iq3_xxs: add some quant mix * iq3_xxs: fix failing quantization test Dot product still fails. Is this real? * iq3_xxs: hopefully fix ROCm * iq3_xxs: failing tests This time the dot product accuracy did find an actual bug in the AVX2 implementation. * Add IQ3_XXS to test-backend-ops --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml.c')
-rw-r--r--ggml.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/ggml.c b/ggml.c
index 5b37487f..c451554f 100644
--- a/ggml.c
+++ b/ggml.c
@@ -599,6 +599,17 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.vec_dot = ggml_vec_dot_iq2_xs_q8_K,
.vec_dot_type = GGML_TYPE_Q8_K,
},
+ [GGML_TYPE_IQ3_XXS] = {
+ .type_name = "iq3_xxs",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq3_xxs),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq3_xxs,
+ .from_float = quantize_row_iq3_xxs,
+ .from_float_reference = (ggml_from_float_t)quantize_row_iq3_xxs_reference,
+ .vec_dot = ggml_vec_dot_iq3_xxs_q8_K,
+ .vec_dot_type = GGML_TYPE_Q8_K,
+ },
[GGML_TYPE_Q8_K] = {
.type_name = "q8_K",
.blck_size = QK_K,
@@ -2144,6 +2155,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_Q6_K: wtype = GGML_TYPE_Q6_K; break;
case GGML_FTYPE_MOSTLY_IQ2_XXS: wtype = GGML_TYPE_IQ2_XXS; break;
case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break;
+ case GGML_FTYPE_MOSTLY_IQ3_XXS: wtype = GGML_TYPE_IQ3_XXS; break;
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
}
@@ -7537,6 +7549,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
{
ggml_compute_forward_add_q_f32(params, src0, src1, dst);
} break;
@@ -7803,6 +7816,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
{
ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
} break;
@@ -7922,6 +7936,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
default:
{
GGML_ASSERT(false);
@@ -10673,6 +10688,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
{
ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
} break;
@@ -10852,6 +10868,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
default:
{
GGML_ASSERT(false);
@@ -11048,6 +11065,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
{
ggml_compute_forward_get_rows_q(params, src0, src1, dst);
} break;
@@ -11695,6 +11713,7 @@ static void ggml_compute_forward_alibi(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_Q8_K:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
@@ -11771,6 +11790,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_Q6_K:
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_Q8_K:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
@@ -18827,6 +18847,7 @@ void ggml_quantize_init(enum ggml_type type) {
switch (type) {
case GGML_TYPE_IQ2_XXS: iq2xs_init_impl(256); break;
case GGML_TYPE_IQ2_XS: iq2xs_init_impl(512); break;
+ case GGML_TYPE_IQ3_XXS: iq3xs_init_impl(256); break;
default: // nothing
break;
}
@@ -19089,6 +19110,15 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i
result = quantize_iq2_xs(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
GGML_ASSERT(result == row_size * nrows);
} break;
+ case GGML_TYPE_IQ3_XXS:
+ {
+ GGML_ASSERT(start % QK_K == 0);
+ GGML_ASSERT(start % n_per_row == 0);
+ size_t start_row = start / n_per_row;
+ size_t row_size = ggml_row_size(type, n_per_row);
+ result = quantize_iq3_xxs(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
+ GGML_ASSERT(result == row_size * nrows);
+ } break;
case GGML_TYPE_F16:
{
size_t elemsize = sizeof(ggml_fp16_t);