summaryrefslogtreecommitdiff
path: root/iqk-quantize.cpp
diff options
context:
space:
mode:
authorIwan Kawrakow <iwan.kawrakow@gmail.com>2024-06-20 15:20:50 +0300
committerIwan Kawrakow <iwan.kawrakow@gmail.com>2024-06-22 12:02:52 +0300
commite05cca9ef652eee7b42927485a3821b14e3c565f (patch)
treed75bda9704a09ad75357d12996ad3d9ea28b3d78 /iqk-quantize.cpp
parent36374ab37dac8fadb634f802aaa3ee7b816fe727 (diff)
bitnet(scale in a separate tensor): CPU improvements
Arrange Q8 quants in blocks of 128 and adapt iqk_mul_mat to deal with that. This improves PP speef by a few percent.
Diffstat (limited to 'iqk-quantize.cpp')
-rw-r--r--iqk-quantize.cpp42
1 files changed, 32 insertions, 10 deletions
diff --git a/iqk-quantize.cpp b/iqk-quantize.cpp
index 6622d5ba..1a672803 100644
--- a/iqk-quantize.cpp
+++ b/iqk-quantize.cpp
@@ -374,29 +374,51 @@ void quantize_row_q8_K64_reference(const float * x, block_q8_K64 * y, int64_t k)
// x += 64;
//}
- for (int i = 0; i < nb; i++) {
-
+ block_q8_K128 * yp = (block_q8_K128 *)y;
+ for (int i = 0; i < nb/2; i++) {
float max = 0;
float amax = 0;
- for (int j = 0; j < 64; ++j) {
+ for (int j = 0; j < 128; ++j) {
float ax = fabsf(x[j]);
if (ax > amax) {
amax = ax; max = x[j];
}
}
if (!amax) {
- y[i].d = 0;
- memset(y[i].qs, 0, 64);
- x += 64;
+ yp[i].d = 0;
+ memset(yp[i].qs, 0, 128);
+ x += 128;
continue;
}
const float iscale = -127.f/max;
- for (int j = 0; j < 64; ++j) {
+ for (int j = 0; j < 128; ++j) {
int v = nearest_int(iscale*x[j]);
- y[i].qs[j] = MIN(127, v);
+ yp[i].qs[j] = MIN(127, v);
+ }
+ yp[i].d = 1/iscale;
+ x += 128;
+ }
+ int i = 2*(nb/2);
+ if (i < nb) {
+ float max = 0;
+ float amax = 0;
+ for (int j = 0; j < 64; ++j) {
+ float ax = fabsf(x[j]);
+ if (ax > amax) {
+ amax = ax; max = x[j];
+ }
+ }
+ if (!amax) {
+ yp[i/2].d = 0;
+ memset(yp[i/2].qs, 0, 64);
+ } else {
+ const float iscale = -127.f/max;
+ for (int j = 0; j < 64; ++j) {
+ int v = nearest_int(iscale*x[j]);
+ yp[i/2].qs[j] = MIN(127, v);
+ }
+ yp[i/2].d = 1/iscale;
}
- y[i].d = 1/iscale;
- x += 64;
}
}