summaryrefslogtreecommitdiff
path: root/ggml-cuda
diff options
context:
space:
mode:
authorIwan Kawrakow <iwan.kawrakow@gmail.com>2024-06-18 18:42:26 +0300
committerIwan Kawrakow <iwan.kawrakow@gmail.com>2024-06-22 12:02:52 +0300
commit181fd9c56eaa64d0a92f9e8be7387f409cfa8745 (patch)
tree4df08e0200a38763b53d3635e86ca7980a99f3ae /ggml-cuda
parentfece7e1db7bf73497a32751af06c6dbf48c26b19 (diff)
Bitnet(1.75 bpw): slightly faster CUDA dot product
We get 205 t/s, so ~13% slower than 2 bit.
Diffstat (limited to 'ggml-cuda')
-rw-r--r--ggml-cuda/convert.cu3
-rw-r--r--ggml-cuda/vecdotq.cuh13
2 files changed, 5 insertions, 11 deletions
diff --git a/ggml-cuda/convert.cu b/ggml-cuda/convert.cu
index ec78549c..13f9f246 100644
--- a/ggml-cuda/convert.cu
+++ b/ggml-cuda/convert.cu
@@ -437,10 +437,11 @@ static __global__ void dequantize_block_iq1_bn(const void * __restrict__ vx, dst
uint8_t u = x[i].extra & 0xff;
s.i = ((((u >> 4) | 0xf0) - 132) << 23) | ((u & 0x0f) << 19);
const float dl = x[i].extra & (1 << (4*ib + il + 8)) ? -s.f : s.f;
+ const float ml = -dl;
uint16_t idx = x[i].ql[4*ib + il] | ((x[i].qh[2*ib + il/2] << (8 - 4*(il%2))) & 0x0f00);
const uint16_t gp = iq1bn_grid_u16[idx];
for (int j = 0; j < 8; ++j) {
- y[j] = dl * (((gp >> 2*j) & 3) - 1);
+ y[j] = dl * ((gp >> 2*j) & 3) + ml;
}
}
diff --git a/ggml-cuda/vecdotq.cuh b/ggml-cuda/vecdotq.cuh
index acab3865..6ec2035a 100644
--- a/ggml-cuda/vecdotq.cuh
+++ b/ggml-cuda/vecdotq.cuh
@@ -1086,23 +1086,16 @@ static __device__ __forceinline__ float vec_dot_iq1_bn_q8_1(
int sumi = 0;
#if __CUDA_ARCH__ >= MIN_CC_DP4A // lowest compute capability for integer intrinsics
const int * q8 = (const int *)bq8_1[iqs].qs;
- //const int minus = 0xffffffff;
for (int l = 0; l < 4; ++l) {
- int sign = extra & (1 << l) ? -1 : 1;
uint16_t val = iq1bn_grid_xxx[bq1->ql[4*iqs + l] | ((bq1->qh[2*iqs + l/2] << (8 - 4*(l%2))) & 0x0f00)];
uint8_t vp = val & 0xff, vm = val >> 8;
int32_t vp1 = __vcmpeq4(((vp & 0xf) * 0x01010101) & 0x08040201, 0x08040201);
int32_t vp2 = __vcmpeq4(((vp >> 4) * 0x01010101) & 0x08040201, 0x08040201);
int32_t vm1 = __vcmpeq4(((vm & 0xf) * 0x01010101) & 0x08040201, 0x08040201);
int32_t vm2 = __vcmpeq4(((vm >> 4) * 0x01010101) & 0x08040201, 0x08040201);
- sumi += (__dp4a(q8[2*l+0], vm1, __dp4a(q8[2*l+1], vm2, 0)) - __dp4a(q8[2*l+0], vp1, __dp4a(q8[2*l+1], vp2, 0)))*sign;
- //int32_t vp1 = __vcmpeq4(((vp & 0xf) * 0x01010101) & 0x08040201, 0x08040201) & q8[2*l+0];
- //int32_t vp2 = __vcmpeq4(((vp >> 4) * 0x01010101) & 0x08040201, 0x08040201) & q8[2*l+1];
- //int32_t vm1 = __vcmpeq4(((vm & 0xf) * 0x01010101) & 0x08040201, 0x08040201) & q8[2*l+0];
- //int32_t vm2 = __vcmpeq4(((vm >> 4) * 0x01010101) & 0x08040201, 0x08040201) & q8[2*l+1];
- //int32_t v1 = __vsubss4(vp1, vm1);
- //int32_t v2 = __vsubss4(vp2, vm2);
- //sumi += __dp4a(v1, 0x01010101, __dp4a(v2, 0x01010101, 0))*sign;
+ int32_t pm = __dp4a(q8[2*l+0], vm1, __dp4a(q8[2*l+1], vm2, 0));
+ int32_t pp = __dp4a(q8[2*l+0], vp1, __dp4a(q8[2*l+1], vp2, 0));
+ sumi += extra & (1 << l) ? pp - pm : pm - pp;
}
#else
const int8_t * q8 = bq8_1[iqs].qs;