From e05cca9ef652eee7b42927485a3821b14e3c565f Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Thu, 20 Jun 2024 15:20:50 +0300 Subject: bitnet(scale in a separate tensor): CPU improvements Arrange Q8 quants in blocks of 128 and adapt iqk_mul_mat to deal with that. This improves PP speef by a few percent. --- ggml-common.h | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'ggml-common.h') diff --git a/ggml-common.h b/ggml-common.h index d3945975..4de80794 100644 --- a/ggml-common.h +++ b/ggml-common.h @@ -306,6 +306,11 @@ typedef struct { int8_t qs[64]; // quants } block_q8_K64; static_assert(sizeof(block_q8_K64) == sizeof(float) + 64, "wrong q8_K64 block size/padding"); +typedef struct { + float d; // delta + int8_t qs[128]; // quants +} block_q8_K128; +static_assert(sizeof(block_q8_K128) == sizeof(float) + 128, "wrong q8_K128 block size/padding"); // (Almost) "true" 2-bit quantization. // Due to the need to use blocks as per ggml design, it ends up using -- cgit v1.2.3