diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2025-01-10 18:18:04 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-01-10 18:18:04 +0200 |
commit | 7553989dd88749de028853f9c0ea39651aad92a3 (patch) | |
tree | 71783a2c138437c940492ca2b6f0e218f20a7b4c /ggml/src/ggml.c | |
parent | b1363b6177661556750c110cf876e044e61af365 (diff) |
Be able to re-quantize MS BitNet I2_S models (#169)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r-- | ggml/src/ggml.c | 24 |
1 files changed, 24 insertions, 0 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c index 71d84c94..5026fd27 100644 --- a/ggml/src/ggml.c +++ b/ggml/src/ggml.c @@ -1605,6 +1605,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = { .nrows = 1, .row_meta_size = 0, }, + [GGML_TYPE_I2_S] = { + .type_name = "i2_s", + .blck_size = 1, + .type_size = 1, + .is_quantized = true, + .to_float = dequantize_row_ms_i2s, + .from_float = NULL, + .from_float_ref = NULL, + .vec_dot = NULL, + .vec_dot_type = GGML_TYPE_Q8_0, + .nrows = 1, + .row_meta_size = 0, + }, }; // For internal test use @@ -4130,6 +4143,10 @@ GGML_CALL size_t ggml_nbytes(const struct ggml_tensor * tensor) { for (int i = 0; i < GGML_MAX_DIMS; ++i) { nbytes += (tensor->ne[i] - 1)*tensor->nb[i]; } + // hack for I2_S + if(tensor->type == GGML_TYPE_I2_S) { + nbytes = nbytes / 4 + 32; + } } else { nbytes = tensor->nb[1]; //tensor->ne[0]*tensor->nb[0]/blck_size; @@ -10825,6 +10842,7 @@ static void ggml_compute_forward_add( case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q6_0_R4: + case GGML_TYPE_I2_S: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -11290,6 +11308,7 @@ static void ggml_compute_forward_add1( case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q6_0_R4: + case GGML_TYPE_I2_S: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -11452,6 +11471,7 @@ static void ggml_compute_forward_acc( case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q6_0_R4: + case GGML_TYPE_I2_S: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -14660,6 +14680,7 @@ static void ggml_compute_forward_out_prod( case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q6_0_R4: + case GGML_TYPE_I2_S: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -15062,6 +15083,7 @@ static void ggml_compute_forward_set( case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q6_0_R4: + case GGML_TYPE_I2_S: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -15358,6 +15380,7 @@ static void ggml_compute_forward_get_rows( case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q6_0_R4: + case GGML_TYPE_I2_S: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: @@ -15983,6 +16006,7 @@ static void ggml_compute_forward_clamp( case GGML_TYPE_Q4_0_R4: case GGML_TYPE_Q5_0_R4: case GGML_TYPE_Q6_0_R4: + case GGML_TYPE_I2_S: case GGML_TYPE_Q8_0_R4: case GGML_TYPE_IQ4_XS: case GGML_TYPE_IQ4_KS: |