summaryrefslogtreecommitdiff
path: root/ggml.c
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-02-21 11:39:52 +0200
committerGitHub <noreply@github.com>2024-02-21 11:39:52 +0200
commita14679cc30c785e75d38028bae6ec39c6209ddef (patch)
tree1e119caa6a0d94c0dbecf5bd8cb7df8d05652b8b /ggml.c
parent6560bed3f066c876682464762cad90f1e28e3f1b (diff)
IQ4_NL: 4-bit non-linear quants with blocks of 32 (#5590)
* iq4_nl: squash commits for easier rebase * Basics (quantize, dequantize) * CUDA dequantize and dot product * Slightly faster CUDA dot product (120 t/s) * Switch to 6-bit scales * Scalar dot product * AVX2 dot product * ARM_NEON dot product * Works on metal, but still slow * Slightly better Metal dot product * Another small Metal improvement * Metal dot product is getting there * Faster CUDA dot product * Add 1/8 ffn_down layers as Q5_K when no imatrix has been provided * Report the actual bpw * Add _xs mix that is 4.05 bpw for non-MoE models * Remove IQ4_XS for now, slightly adjust kvalues_iq4nl * AVX2 dot product uses Q8_0 instead of Q8_K * Add to test-backend-ops * Minor fix * Also use use Q5_K for attn_output in MoE models * Fixes after merging latest master * Switching to blocks of 32 * AVX2 for blocks of 32 * Scaler dot product for blocks of 32 * ARM_NEON dot product for blocks of 32 * Metal kernels for blocks of 32 * Slightly faster Metal kernels * iq4_nl: Fix after merging with master * iq4_nl: another fix after merging with master * Use IQ4_NL instead of Q4_K when using k-quants is not possible * Fix typo that makes several tests fail * It was the ggml_vdotq thing missed inside the brackets --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml.c')
-rw-r--r--ggml.c30
1 files changed, 30 insertions, 0 deletions
diff --git a/ggml.c b/ggml.c
index d129df50..91adbb0a 100644
--- a/ggml.c
+++ b/ggml.c
@@ -690,6 +690,18 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.vec_dot_type = GGML_TYPE_Q8_K,
.nrows = 1,
},
+ [GGML_TYPE_IQ4_NL] = {
+ .type_name = "iq4_nl",
+ .blck_size = QK4_NL,
+ .type_size = sizeof(block_iq4_nl),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq4_nl,
+ .from_float = quantize_row_iq4_nl,
+ .from_float_reference = (ggml_from_float_t)quantize_row_iq4_nl_reference,
+ .vec_dot = ggml_vec_dot_iq4_nl_q8_0,
+ .vec_dot_type = GGML_TYPE_Q8_0,
+ .nrows = 1,
+ },
[GGML_TYPE_Q8_K] = {
.type_name = "q8_K",
.blck_size = QK_K,
@@ -2291,6 +2303,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ2_XS: wtype = GGML_TYPE_IQ2_XS; break;
case GGML_FTYPE_MOSTLY_IQ3_XXS: wtype = GGML_TYPE_IQ3_XXS; break;
case GGML_FTYPE_MOSTLY_IQ1_S: wtype = GGML_TYPE_IQ1_S; break;
+ case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
case GGML_FTYPE_UNKNOWN: wtype = GGML_TYPE_COUNT; break;
case GGML_FTYPE_MOSTLY_Q4_1_SOME_F16: wtype = GGML_TYPE_COUNT; break;
}
@@ -7702,6 +7715,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
{
ggml_compute_forward_add_q_f32(params, src0, src1, dst);
} break;
@@ -7970,6 +7984,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
{
ggml_compute_forward_add1_q_f32(params, src0, src1, dst);
} break;
@@ -8091,6 +8106,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
default:
{
GGML_ASSERT(false);
@@ -10858,6 +10874,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
{
ggml_compute_forward_out_prod_q_f32(params, src0, src1, dst);
} break;
@@ -11039,6 +11056,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
default:
{
GGML_ASSERT(false);
@@ -11237,6 +11255,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
{
ggml_compute_forward_get_rows_q(params, src0, src1, dst);
} break;
@@ -11911,6 +11930,7 @@ static void ggml_compute_forward_alibi(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
case GGML_TYPE_Q8_K:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
@@ -11989,6 +12009,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ2_XS:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ4_NL:
case GGML_TYPE_Q8_K:
case GGML_TYPE_I8:
case GGML_TYPE_I16:
@@ -19455,6 +19476,15 @@ size_t ggml_quantize_chunk(enum ggml_type type, const float * src, void * dst, i
result = quantize_iq1_s(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
GGML_ASSERT(result == row_size * nrows);
} break;
+ case GGML_TYPE_IQ4_NL:
+ {
+ GGML_ASSERT(start % QK4_NL == 0);
+ GGML_ASSERT(start % n_per_row == 0);
+ size_t start_row = start / n_per_row;
+ size_t row_size = ggml_row_size(type, n_per_row);
+ result = quantize_iq4_nl(src + start, (char *)dst + start_row * row_size, nrows, n_per_row, hist, imatrix);
+ GGML_ASSERT(result == row_size * nrows);
+ } break;
case GGML_TYPE_F16:
{
size_t elemsize = sizeof(ggml_fp16_t);