summaryrefslogtreecommitdiff
path: root/ggml/src/ggml.c
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2024-12-06 12:15:39 +0100
committerGitHub <noreply@github.com>2024-12-06 12:15:39 +0100
commit3682e4700db6b8cb2ca8e3da365578078f21ab0c (patch)
treeea1680494ca00580b0a038cdef035c596e80e58c /ggml/src/ggml.c
parentf64de08203aaee95ca755336de3e1db85d990198 (diff)
iq2_bn_r4: fastest Bitnet CPU implementation on the planet (#124)
* Adding iq2_bn_r4 This Zen4-only implementation achieves PP-512 = 826 t/s (!!!) for Bitnet-1.58b-3B, up from 620 t/s for iq2_bn. * Make sure rows per thread are a multiple of the number of interleaved rows With this I can run iq2_bn_r4 with 32 threads and this increases PP-512 to 872 t/s. * iq2_bn_r4: 1st shot at NEON PP-512 is already faster than iq2_bn (284 t/s vs 246 t/s for Bitnet-1.58b-3B). TG-128 is ~5% slower. * iq2_bn_r4: NEON PP-512 is now 296 t/s. TG-128 is ~20% faster than iq2_bn for 1 thread, but saturates to about the same 93 t/s at 8 threads. * iq2_bn_r4: Experimenting on NEON The matrix x vvector multiplication is erratic. iq2_bn_r4 is faster at 1, 2, and 4 threads, but saturates to a lower t/s at 8 threads compared to iq2_bn. iq2_bn actually manages 99 t/s at 8 threads and not 93 as I wrore in the last commit. iq2_bn_r4 performance has huge fluctuations at 4 and 8 threads. * Some cleanup * iq2_bn_r4: AVX2 As expected, PP is slightly slower as we just don;t have enough vector registers (690 vs 710 t/s). TG is slightly faster (18.2 vs 16.7 t/s at 1 thread). * iq2_bn_r4: use AVX2 implementation on Zen4 for matrix x vector It is faster - we get 29.6 t/s at 1 thread vs 25.9 t/s for iq2_bn. * iq2_bn_r4: simdify q8_K16 quantization (AVX2) PP-512 becomes 834 t/s and TG-128 now saturates to the same performance as iq2_bn for 4 threads. * iq2_bn_r4: simdify q8_K16 quantization (NEON) PP-512 is now 304.7 t/s, and TG-128 @ 8 threads very slightly outperforms iq2_bn (100.7 t/s vs 99.6 t/s) * iq2_bn_r4: fix AVX2 after breaking it two commits ago * iq2_bn_r4: better AVX2 As we don't have enough vector registers on AVX2, it is better to do two passes per row needing only half of the accumulator registers that way. With this, we now beat iq2_bn PP also on AVX2 by a small margin. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r--ggml/src/ggml.c39
1 files changed, 36 insertions, 3 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index f4320e99..12afec05 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1026,11 +1026,24 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.to_float = (ggml_to_float_t) dequantize_row_iq2_bn,
.from_float = quantize_row_iq2_bn,
.from_float_ref = (ggml_from_float_t)quantize_row_iq2_bn_ref,
- .vec_dot = ggml_vec_dot_iq2_bn_q8_K64,
+ .vec_dot = vec_dot_iq2_bn_q8_K64,
.vec_dot_type = GGML_TYPE_Q8_K64,
.nrows = 1,
.row_meta_size = 4,
},
+ [GGML_TYPE_IQ2_BN_R4] = {
+ .type_name = "iq2_bn_r4",
+ .blck_size = QK_IQ1BN,
+ .type_size = sizeof(block_iq2_bn),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq2_bn_r4,
+ .from_float = quantize_row_iq2_bn_r4,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq2_bn_r4_ref,
+ .vec_dot = vec_dot_iq2_bn_r4_q8_K64,
+ .vec_dot_type = GGML_TYPE_Q8_K16,
+ .nrows = 1,
+ .row_meta_size = 4,
+ },
[GGML_TYPE_IQ4_NL] = {
.type_name = "iq4_nl",
.blck_size = QK4_NL,
@@ -1103,6 +1116,14 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.from_float = quantize_row_q8_K64,
.row_meta_size = 0,
},
+ [GGML_TYPE_Q8_K16] = {
+ .type_name = "q8_K16",
+ .blck_size = 64,
+ .type_size = 64,
+ .is_quantized = true,
+ .from_float = quantize_row_q8_K16,
+ .row_meta_size = 20,
+ },
[GGML_TYPE_BF16] = {
.type_name = "bf16",
.blck_size = 1,
@@ -4000,6 +4021,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ1_M: wtype = GGML_TYPE_IQ1_M; break;
case GGML_FTYPE_MOSTLY_IQ1_BN: wtype = GGML_TYPE_IQ1_BN; break;
case GGML_FTYPE_MOSTLY_IQ2_BN: wtype = GGML_TYPE_IQ2_BN; break;
+ case GGML_FTYPE_MOSTLY_IQ2_BN_R4: wtype = GGML_TYPE_IQ2_BN_R4;break;
case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
case GGML_FTYPE_MOSTLY_IQ4_NL_X4: wtype = GGML_TYPE_IQ4_NL_X4;break;
case GGML_FTYPE_MOSTLY_IQ4_XS_R4: wtype = GGML_TYPE_IQ4_XS_R4;break;
@@ -10529,6 +10551,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_BN_R4:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_IQ4_XS_R4:
@@ -10977,6 +11000,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_BN_R4:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_IQ4_XS_R4:
@@ -11122,6 +11146,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_BN_R4:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_IQ4_XS_R4:
@@ -14313,6 +14338,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_BN_R4:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_IQ4_XS_R4:
@@ -14698,6 +14724,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_BN_R4:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_IQ4_XS_R4:
@@ -14977,6 +15004,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_BN_R4:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_IQ4_XS_R4:
@@ -15583,6 +15611,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_BN_R4:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_IQ4_XS_R4:
@@ -15603,6 +15632,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q8_K:
case GGML_TYPE_Q8_K64:
+ case GGML_TYPE_Q8_K16:
case GGML_TYPE_Q4_0_4_4:
case GGML_TYPE_Q4_0_4_8:
case GGML_TYPE_Q4_0_8_8:
@@ -20476,7 +20506,8 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
const enum ggml_type vec_dot_type = type_traits[node->src[0]->type].vec_dot_type;
if (node->src[1]->type != vec_dot_type) {
- cur_q = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
+ cur_q = ggml_row_size(vec_dot_type, node->src[1]->ne[0]) * ggml_nrows(node->src[1]);
+ //cur_q = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
}
} break;
case GGML_OP_MUL_MAT_ID:
@@ -20486,7 +20517,8 @@ struct ggml_cplan ggml_graph_plan(const struct ggml_cgraph * cgraph, int n_threa
const struct ggml_tensor * src1 = node->src[1];
const enum ggml_type vec_dot_type = type_traits[src0->type].vec_dot_type;
if (src1->type != vec_dot_type) {
- cur_q += ggml_row_size(vec_dot_type, ggml_nelements(src1));
+ cur_q += ggml_row_size(vec_dot_type, node->src[1]->ne[0]) * ggml_nrows(node->src[1]);
+ //cur_q += ggml_row_size(vec_dot_type, ggml_nelements(src1));
}
const int n_as = src0->ne[2];
cur_q += GGML_PAD(cur, sizeof(int64_t)); // align
@@ -22415,6 +22447,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ1_BN: result = quantize_iq1_bn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_BN: result = quantize_iq2_bn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ2_BN_R4:result = quantize_iq2_bn_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_NL_X4: result = quantize_iq4_nl_x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_XS_R4: result = quantize_iq4_xs_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;