From 91ec824f2de3a073551ab8c5c19672d44f59b676 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Mon, 17 Jun 2024 08:09:39 +0300 Subject: iqk_mul_mat: improve iq1_bn (bitnet) on AVX2 We now get 207 t/s for PP-512 and 51 t/s for TG-128 using 16 threads. --- ggml-quants.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) (limited to 'ggml-quants.c') diff --git a/ggml-quants.c b/ggml-quants.c index fc3155ef..5f2bcf69 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -3724,6 +3724,22 @@ void quantize_row_q8_K64_reference(const float * restrict x, block_q8_K64 * rest assert(k % 64 == 0); const int64_t nb = k / 64; + // Check if a row-wise scale works. It almost does, PPL is only ~0.02 higher + //float amax = 0; + //for (int j = 0; j < k; ++j) { + // float ax = fabsf(x[j]); + // amax = MAX(ax, amax); + //} + + //float d = amax/127; + //float id = d ? 1/d : 0.f; + + //for (int i = 0; i < nb; i++) { + // for (int j = 0; j < 64; ++j) y[i].qs[j] = nearest_int(id*x[j]); + // y[i].d = d; + // x += 64; + //} + for (int i = 0; i < nb; i++) { float max = 0; -- cgit v1.2.3