summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-05-26 19:34:54 +0300
committerGitHub <noreply@github.com>2025-05-26 19:34:54 +0300
commit14292913260af89f37a6b856ef73bf88bda25129 (patch)
tree67aed02032ea97819a279e28957821f337221c9e
parent24c010b3916b5f1bb9d712d610d1fe9308ef7df4 (diff)
CUDA implementation for IQ2_K_R4, IQ3_K_R4, IQ4_K_R4, IQ5_K_R4 (#461)
* CUDA: iq4_k_r4 dequantize * CUDA: iq4_k_r4 GEMV ~10% slower than iq4_k. * CUDA: slightly faster iq4_k_r4 GEMV * CUDA: slightly faster iq4_k_r4 GEMV We are now within 3% of iq4_k * CUDA: iq5_k_r4 dequantize * CUDA: iq5_k_r4 GEMV ~3% slower than iq5_k. * CUDA: iq3_k_r4 dequantize * CUDA: iq3_k_r4 GEMV * CUDA: slightly faster iq3_k_r4 GEMV * CUDA: iq2_k_r4 GEMV * CUDA: faster iq2_k_r4 GEMV --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
-rw-r--r--ggml/src/ggml-cuda.cu4
-rw-r--r--ggml/src/ggml-cuda/convert.cu246
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cu383
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cuh20
-rw-r--r--ggml/src/ggml-cuda/mmvq.cu16
5 files changed, 596 insertions, 73 deletions
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
index f55715f1..6331bc17 100644
--- a/ggml/src/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda.cu
@@ -3470,6 +3470,10 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
+ case GGML_TYPE_IQ2_K_R4:
+ case GGML_TYPE_IQ3_K_R4:
+ case GGML_TYPE_IQ4_K_R4:
+ case GGML_TYPE_IQ5_K_R4:
return true;
default:
return false;
diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index 17604f1c..2ccca01b 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -755,6 +755,53 @@ static __global__ void dequantize_block_iq4_k(const void * __restrict__ vx, dst_
}
template<typename dst_t>
+static __global__ void dequantize_block_iq4_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
+
+ int64_t ii = blockIdx.x;
+
+ int64_t nblock = n_per_row/256;
+ int64_t row = ii/nblock;
+ int64_t row4 = row/4;
+ int64_t ir = row%4;
+ int64_t ibl = row4*nblock + ii%nblock;
+
+ const int tid = threadIdx.x;
+ const int il = tid/8; // 0...3
+ const int ib = tid%8; // 0...7
+
+ const block_iq4_k_r4 * x = (const block_iq4_k_r4 *)vx;
+ dst_t * y = yy + 256*ii + 32*ib;
+
+ const float d = __half2float(x[ibl].d[ir]);
+ int is = 8*ib + ir;
+ float dl1 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32);
+ is += 4;
+ float dl2 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32);
+ auto values1 = iq4k_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 4);
+ auto values2 = iq4k_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 4);
+ auto qs = x[ibl].qs + 64*ib + 4*ir;
+ if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
+ y[il+ 0] = __float2bfloat16(dl1 * values1[qs[il+ 0] & 0xf]);
+ y[il+ 8] = __float2bfloat16(dl1 * values1[qs[il+ 0] >> 4]);
+ y[il+16] = __float2bfloat16(dl2 * values2[qs[il+16] & 0xf]);
+ y[il+24] = __float2bfloat16(dl2 * values2[qs[il+16] >> 4]);
+ y[il+ 4] = __float2bfloat16(dl1 * values1[qs[il+32] & 0xf]);
+ y[il+12] = __float2bfloat16(dl1 * values1[qs[il+32] >> 4]);
+ y[il+20] = __float2bfloat16(dl2 * values2[qs[il+48] & 0xf]);
+ y[il+28] = __float2bfloat16(dl2 * values2[qs[il+48] >> 4]);
+ } else {
+ y[il+ 0] = dl1 * values1[qs[il+ 0] & 0xf];
+ y[il+ 4] = dl1 * values1[qs[il+32] & 0xf];
+ y[il+ 8] = dl1 * values1[qs[il+ 0] >> 4];
+ y[il+12] = dl1 * values1[qs[il+32] >> 4];
+ y[il+16] = dl2 * values2[qs[il+16] & 0xf];
+ y[il+20] = dl2 * values2[qs[il+48] & 0xf];
+ y[il+24] = dl2 * values2[qs[il+16] >> 4];
+ y[il+28] = dl2 * values2[qs[il+48] >> 4];
+ }
+}
+
+template<typename dst_t>
static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
@@ -791,6 +838,149 @@ static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_
}
}
+template<typename dst_t>
+static __global__ void dequantize_block_iq5_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
+
+ int64_t ii = blockIdx.x;
+
+ int64_t nblock = n_per_row/256;
+ int64_t row = ii/nblock;
+ int64_t row4 = row/4;
+ int64_t ir = row%4;
+ int64_t ibl = row4*nblock + ii%nblock;
+
+ const int tid = threadIdx.x;
+ const int il = tid/8; // 0...3
+ const int ib = tid%8; // 0...7
+
+ const block_iq5_k_r4 * x = (const block_iq5_k_r4 *)vx;
+ dst_t * y = yy + 256*ii + 32*ib;
+
+ const float d = __half2float(x[ibl].d[ir]);
+ int is = 8*ib + ir;
+ float dl1 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32);
+ is += 4;
+ float dl2 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32);
+ auto values1 = iq5nl_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 5);
+ auto values2 = iq5nl_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 5);
+ auto qs = x[ibl].qs + 64*ib + 4*ir;
+ auto qh = x[ibl].qh + 16*ib + 4*ir;
+ if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
+ y[il+ 0] = __float2bfloat16(dl1 * values1[(qs[il+ 0] & 0xf) | (((qh[il] >> 0) & 1) << 4)]);
+ y[il+ 4] = __float2bfloat16(dl1 * values1[(qs[il+32] & 0xf) | (((qh[il] >> 4) & 1) << 4)]);
+ y[il+ 8] = __float2bfloat16(dl1 * values1[(qs[il+ 0] >> 4) | (((qh[il] >> 1) & 1) << 4)]);
+ y[il+12] = __float2bfloat16(dl1 * values1[(qs[il+32] >> 4) | (((qh[il] >> 5) & 1) << 4)]);
+ y[il+16] = __float2bfloat16(dl2 * values2[(qs[il+16] & 0xf) | (((qh[il] >> 2) & 1) << 4)]);
+ y[il+20] = __float2bfloat16(dl2 * values2[(qs[il+48] & 0xf) | (((qh[il] >> 6) & 1) << 4)]);
+ y[il+24] = __float2bfloat16(dl2 * values2[(qs[il+16] >> 4) | (((qh[il] >> 3) & 1) << 4)]);
+ y[il+28] = __float2bfloat16(dl2 * values2[(qs[il+48] >> 4) | (((qh[il] >> 7) & 1) << 4)]);
+ } else {
+ y[il+ 0] = dl1 * values1[(qs[il+ 0] & 0xf) | (((qh[il] >> 0) & 1) << 4)];
+ y[il+ 4] = dl1 * values1[(qs[il+32] & 0xf) | (((qh[il] >> 4) & 1) << 4)];
+ y[il+ 8] = dl1 * values1[(qs[il+ 0] >> 4) | (((qh[il] >> 1) & 1) << 4)];
+ y[il+12] = dl1 * values1[(qs[il+32] >> 4) | (((qh[il] >> 5) & 1) << 4)];
+ y[il+16] = dl2 * values2[(qs[il+16] & 0xf) | (((qh[il] >> 2) & 1) << 4)];
+ y[il+20] = dl2 * values2[(qs[il+48] & 0xf) | (((qh[il] >> 6) & 1) << 4)];
+ y[il+24] = dl2 * values2[(qs[il+16] >> 4) | (((qh[il] >> 3) & 1) << 4)];
+ y[il+28] = dl2 * values2[(qs[il+48] >> 4) | (((qh[il] >> 7) & 1) << 4)];
+ }
+}
+
+template<typename dst_t>
+static __global__ void dequantize_block_iq2_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
+
+ int64_t ii = blockIdx.x;
+
+ int64_t nblock = n_per_row/256;
+ int64_t row = ii/nblock;
+ int64_t row4 = row/4;
+ int64_t ir = row%4;
+ int64_t ibl = row4*nblock + ii%nblock;
+
+ const int tid = threadIdx.x;
+ const int il = tid/8; // 0...3
+ const int ib = tid%8; // 0...7
+
+ const block_iq2_k_r4 * x = (const block_iq2_k_r4 *)vx;
+ dst_t * y = yy + 256*ii + 32*ib;
+
+ const float d = __half2float(x[ibl].d[ir]);
+ int is = 8*ib + ir;
+ float dl1 = d * (((x[ibl].scales[is%32] >> 4*(is/32)) & 0xf) - 8);
+ is += 4;
+ float dl2 = d * (((x[ibl].scales[is%32] >> 4*(is/32)) & 0xf) - 8);
+ auto values1 = iq2nl_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 2);
+ auto values2 = iq2nl_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 2);
+ auto ql = x[ibl].qs + 32*ib + 4*ir;
+ if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
+ y[il+ 0] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 0) & 3]);
+ y[il+ 4] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 2) & 3]);
+ y[il+ 8] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 4) & 3]);
+ y[il+12] = __float2bfloat16(dl1 * values1[(ql[il+ 0] >> 6) & 3]);
+ y[il+16] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 0) & 3]);
+ y[il+20] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 2) & 3]);
+ y[il+24] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 4) & 3]);
+ y[il+28] = __float2bfloat16(dl2 * values2[(ql[il+16] >> 6) & 3]);
+ } else {
+ y[il+ 0] = dl1 * values1[(ql[il+ 0] >> 0) & 3];
+ y[il+ 4] = dl1 * values1[(ql[il+ 0] >> 2) & 3];
+ y[il+ 8] = dl1 * values1[(ql[il+ 0] >> 4) & 3];
+ y[il+12] = dl1 * values1[(ql[il+ 0] >> 6) & 3];
+ y[il+16] = dl2 * values2[(ql[il+16] >> 0) & 3];
+ y[il+20] = dl2 * values2[(ql[il+16] >> 2) & 3];
+ y[il+24] = dl2 * values2[(ql[il+16] >> 4) & 3];
+ y[il+28] = dl2 * values2[(ql[il+16] >> 6) & 3];
+ }
+}
+
+template<typename dst_t>
+static __global__ void dequantize_block_iq3_k_r4(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
+
+ int64_t ii = blockIdx.x;
+
+ int64_t nblock = n_per_row/256;
+ int64_t row = ii/nblock;
+ int64_t row4 = row/4;
+ int64_t ir = row%4;
+ int64_t ibl = row4*nblock + ii%nblock;
+
+ const int tid = threadIdx.x;
+ const int il = tid/8; // 0...3
+ const int ib = tid%8; // 0...7
+
+ const block_iq3_k_r4 * x = (const block_iq3_k_r4 *)vx;
+ dst_t * y = yy + 256*ii + 32*ib;
+
+ const float d = __half2float(x[ibl].d[ir]);
+ int is = 8*ib + ir;
+ float dl1 = d * (2*((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) + 1) * ((x[ibl].scales_h[is%8] >> (is/8)) & 1 ? -1 : 1);
+ is += 4;
+ float dl2 = d * (2*((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) + 1) * ((x[ibl].scales_h[is%8] >> (is/8)) & 1 ? -1 : 1);
+ auto values1 = iq3nl_values + (((x[ibl].extra[ir+0] >> ib) & 1) << 3);
+ auto values2 = iq3nl_values + (((x[ibl].extra[ir+4] >> ib) & 1) << 3);
+ auto ql = x[ibl].qs + 32*ib + 4*ir;
+ auto qh = x[ibl].qh + 16*ib + 4*ir;
+ if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
+ y[il+ 0] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 0) & 3) | ((qh[il] << 2) & 4)]);
+ y[il+ 4] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 2) & 3) | ((qh[il] << 1) & 4)]);
+ y[il+ 8] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 4) & 3) | ((qh[il] << 0) & 4)]);
+ y[il+12] = __float2bfloat16(dl1 * values1[((ql[il+ 0] >> 6) & 3) | ((qh[il] >> 1) & 4)]);
+ y[il+16] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 0) & 3) | ((qh[il] >> 2) & 4)]);
+ y[il+20] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 2) & 3) | ((qh[il] >> 3) & 4)]);
+ y[il+24] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 4) & 3) | ((qh[il] >> 4) & 4)]);
+ y[il+28] = __float2bfloat16(dl2 * values2[((ql[il+16] >> 6) & 3) | ((qh[il] >> 5) & 4)]);
+ } else {
+ y[il+ 0] = dl1 * values1[((ql[il+ 0] >> 0) & 3) | ((qh[il] << 2) & 4)];
+ y[il+ 4] = dl1 * values1[((ql[il+ 0] >> 2) & 3) | ((qh[il] << 1) & 4)];
+ y[il+ 8] = dl1 * values1[((ql[il+ 0] >> 4) & 3) | ((qh[il] << 0) & 4)];
+ y[il+12] = dl1 * values1[((ql[il+ 0] >> 6) & 3) | ((qh[il] >> 1) & 4)];
+ y[il+16] = dl2 * values2[((ql[il+16] >> 0) & 3) | ((qh[il] >> 2) & 4)];
+ y[il+20] = dl2 * values2[((ql[il+16] >> 2) & 3) | ((qh[il] >> 3) & 4)];
+ y[il+24] = dl2 * values2[((ql[il+16] >> 4) & 3) | ((qh[il] >> 4) & 4)];
+ y[il+28] = dl2 * values2[((ql[il+16] >> 6) & 3) | ((qh[il] >> 5) & 4)];
+ }
+}
+
template<typename dst_t>
static __global__ void dequantize_block_iq5_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
@@ -1203,6 +1393,22 @@ static void dequantize_row_iq3_k_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq3_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
+ const int64_t k = nrows * n_per_row;
+ const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row);
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq3_k_r4<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
+}
+
+template<typename dst_t>
+static void dequantize_row_iq2_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
+ const int64_t k = nrows * n_per_row;
+ const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row);
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq2_k_r4<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
+}
+
+template<typename dst_t>
static void dequantize_row_iq4_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
const int nb = (k + QK_K - 1) / QK_K;
@@ -1210,6 +1416,14 @@ static void dequantize_row_iq4_k_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq4_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
+ const int64_t k = nrows * n_per_row;
+ const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row);
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq4_k_r4<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
+}
+
+template<typename dst_t>
static void dequantize_row_iq5_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
const int nb = (k + QK_K - 1) / QK_K;
@@ -1217,6 +1431,14 @@ static void dequantize_row_iq5_k_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq5_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
+ const int64_t k = nrows * n_per_row;
+ const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row);
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq5_k_r4<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
+}
+
+template<typename dst_t>
static void dequantize_row_iq6_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
const int nb = (k + QK_K - 1) / QK_K;
@@ -1312,6 +1534,14 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) {
return dequantize_row_iq5_k_cuda<nv_bfloat16>;
case GGML_TYPE_IQ6_K:
return dequantize_row_iq6_k_cuda<nv_bfloat16>;
+ case GGML_TYPE_IQ2_K_R4:
+ return dequantize_row_iq2_k_r4_cuda<nv_bfloat16>;
+ case GGML_TYPE_IQ3_K_R4:
+ return dequantize_row_iq3_k_r4_cuda<nv_bfloat16>;
+ case GGML_TYPE_IQ4_K_R4:
+ return dequantize_row_iq4_k_r4_cuda<nv_bfloat16>;
+ case GGML_TYPE_IQ5_K_R4:
+ return dequantize_row_iq5_k_r4_cuda<nv_bfloat16>;
default:
return nullptr;
}
@@ -1394,6 +1624,14 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
return convert_unary_cuda<float>;
case GGML_TYPE_BF16:
return convert_from_bf16_cuda;
+ case GGML_TYPE_IQ2_K_R4:
+ return dequantize_row_iq2_k_r4_cuda;
+ case GGML_TYPE_IQ3_K_R4:
+ return dequantize_row_iq3_k_r4_cuda;
+ case GGML_TYPE_IQ4_K_R4:
+ return dequantize_row_iq4_k_r4_cuda;
+ case GGML_TYPE_IQ5_K_R4:
+ return dequantize_row_iq5_k_r4_cuda;
default:
return nullptr;
}
@@ -1473,6 +1711,14 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
return convert_unary_cuda<half>;
case GGML_TYPE_BF16:
return convert_from_bf16_cuda;
+ case GGML_TYPE_IQ2_K_R4:
+ return dequantize_row_iq2_k_r4_cuda;
+ case GGML_TYPE_IQ3_K_R4:
+ return dequantize_row_iq3_k_r4_cuda;
+ case GGML_TYPE_IQ4_K_R4:
+ return dequantize_row_iq4_k_r4_cuda;
+ case GGML_TYPE_IQ5_K_R4:
+ return dequantize_row_iq5_k_r4_cuda;
default:
return nullptr;
}
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu
index 6a2db725..20bacd97 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cu
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cu
@@ -6,15 +6,47 @@
#include "iqk_mmvq.cuh"
-typedef float (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs);
+typedef void (*vec_dot_q_cuda_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float *);
+
+template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ2_K_R4> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR4_XS;
+ static constexpr int qi = QI4_XS;
+};
+
+template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ3_K_R4> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR4_XS;
+ static constexpr int qi = QI4_XS;
+};
+
+template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ4_K_R4> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR4_XS;
+ static constexpr int qi = QI4_XS;
+};
+
+template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ5_K_R4> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR5_XS;
+ static constexpr int qi = QI5_XS;
+};
+
// Reminder:
// constexpr int qk = ggml_cuda_type_traits<type>::qk;
// constexpr int qi = ggml_cuda_type_traits<type>::qi;
// constexpr int vdr = get_vdr_mmvq(type);
+// QI4_XS = 256/(4*2) = 32
+// vdr = 4, qi = 32 -> qi/vdr = 8, kqs = 4*(tid%8), blocks_per_iter = 4*1*32/32 = 4
+// vdr = 2, qi = 32 -> qi/vdr =16, kqs = 2*(tid%16), blocks_per_iter = 2*1*32/32 = 2
namespace {
-template <ggml_type type, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda, int ncols_y>
+template <ggml_type type, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda, int ncols_y, int n_interleaved = 1>
__device__ void iqk_mul_mat_vec_q(
const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int nrows_dst, const int64_t row_size) {
@@ -24,10 +56,10 @@ __device__ void iqk_mul_mat_vec_q(
#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && (defined(RDNA2) || defined(RDNA3))
constexpr int nwarps = 1;
- constexpr int rows_per_cuda_block = 1;
+ constexpr int rows_per_cuda_block = n_interleaved;
#else
- constexpr int nwarps = ncols_y <= 4 ? 4 : 2;
- constexpr int rows_per_cuda_block = ncols_y == 1 ? 1 : 2;
+ constexpr int nwarps = n_interleaved == 1 ? ncols_y <= 4 ? 4 : 2 : 1;
+ constexpr int rows_per_cuda_block = n_interleaved == 1 ? ncols_y == 1 ? 1 : 2 : n_interleaved;
#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) && !defined(RDNA2) && !defined(RDNA3)
const int tid = WARP_SIZE*threadIdx.y + threadIdx.x;
@@ -49,10 +81,15 @@ __device__ void iqk_mul_mat_vec_q(
#pragma unroll
for (int j = 0; j < ncols_y; ++j) {
+ if constexpr (n_interleaved == 1) {
#pragma unroll
- for (int i = 0; i < rows_per_cuda_block; ++i) {
- tmp[j][i] += vec_dot_q_cuda((const void *)((const char *)vx + (row0 + i)*row_size),
- &y[j*blocks_per_col_y + kby], kbx, kqs);
+ for (int i = 0; i < rows_per_cuda_block; ++i) {
+ vec_dot_q_cuda((const void *)((const char *)vx + (row0 + i)*row_size),
+ &y[j*blocks_per_col_y + kby], kbx, kqs, &tmp[j][i]);
+ }
+ } else {
+ vec_dot_q_cuda((const void *)((const char *)vx + row0*row_size),
+ &y[j*blocks_per_col_y + kby], kbx, kqs, tmp[j]);
}
}
}
@@ -90,7 +127,7 @@ __device__ void iqk_mul_mat_vec_q(
}
}
-template <ggml_type type, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda, int ncols_y>
+template <ggml_type type, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda, int ncols_y, int n_interleaved = 1>
#if !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__))
// tell the compiler to use as many registers as it wants, see nwarps definition below
__launch_bounds__((ncols_y <= 4 ? 4 : 2)*WARP_SIZE, 1)
@@ -105,10 +142,10 @@ __global__ void iqk_mul_mat_vec_q(
const char * cx = (const char *)vx + i02*nb02;
const char * cy = (const char *)vy + i2*nb12;
char * cdst = (char *)dst + i2*nb2;
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, ncols_y>(cx, cy, (float *)cdst, ncols_x, nrows_x, nrows_y, nrows_dst, row_size);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, ncols_y, n_interleaved>(cx, cy, (float *)cdst, ncols_x, nrows_x, nrows_y, nrows_dst, row_size);
}
-template <ggml_type type, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda>
+template <ggml_type type, int vdr, vec_dot_q_cuda_t vec_dot_q_cuda, int n_interleaved = 1>
void iqk_mul_mat_vec_q_cuda(
const void * vx, const void * vy, float * dst, const char * ids_data,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
@@ -120,26 +157,26 @@ void iqk_mul_mat_vec_q_cuda(
int id = ggml_cuda_get_device();
int64_t nwarps = 1;
- int64_t rows_per_cuda_block = 1;
+ int64_t rows_per_cuda_block = n_interleaved;
if (ggml_cuda_info().devices[id].cc < CC_RDNA2) { // NVIDIA and AMD older than RDNA2
switch(ncols_y) {
case 1:
- nwarps = 4;
- rows_per_cuda_block = 1;
+ nwarps = n_interleaved == 1 ? 4 : 1;
+ rows_per_cuda_block = n_interleaved == 1 ? 1 : n_interleaved;
break;
case 2:
case 3:
case 4:
- nwarps = 4;
- rows_per_cuda_block = 2;
+ nwarps = n_interleaved == 1 ? 4 : 1;
+ rows_per_cuda_block = n_interleaved == 1 ? 2 : n_interleaved;
break;
case 5:
case 6:
case 7:
case 8:
- nwarps = 2;
- rows_per_cuda_block = 2;
+ nwarps = n_interleaved == 1 ? 2 : 1;
+ rows_per_cuda_block = n_interleaved == 1 ? 2 : n_interleaved;
break;
default:
GGML_ASSERT(false);
@@ -154,28 +191,28 @@ void iqk_mul_mat_vec_q_cuda(
switch (ncols_y) {
case 1:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 1><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 1, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
case 2:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 2><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 2, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
case 3:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 3><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 3, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
case 4:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 4><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 4, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
case 5:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 5><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 5, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
case 6:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 6><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 6, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
case 7:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 7><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 7, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
case 8:
- iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 8><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
+ iqk_mul_mat_vec_q<type, vdr, vec_dot_q_cuda, 8, n_interleaved><<<block_nums, block_dims, 0, stream>>>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, nrows_dst, row_size, nb02, nb12, nb2, ids_nb0);
break;
default:
GGML_ASSERT(false);
@@ -202,8 +239,8 @@ __device__ __forceinline__ void get_int_from_table_16_shift(const uint32_t & q4,
#define VDR_IQ4_K_Q8_1_MMVQ 4
#define VDR_IQ4_K_Q8_1_MMQ 4
-__device__ __forceinline__ float vec_dot_iq4_k_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq4_k_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
const block_iq4_k * bq4 = (const block_iq4_k *) vbq + kbx;
const uint8_t * all_values = (const uint8_t *)iq4k_values;
@@ -226,14 +263,57 @@ __device__ __forceinline__ float vec_dot_iq4_k_q8_1(
const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2);
const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32;
const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32;
- return d * (sumi1 * ls1 + sumi2 * ls2);
+ *result += d * (sumi1 * ls1 + sumi2 * ls2);
+}
+
+static __device__ __forceinline__ int2 get_int_from_table_16(const int & q4, const int8_t * values) {
+ const int q0_32 = (q4 >> 0) & 0x0F0F0F0F;
+ const int8_t * q0_8 = (const int8_t *) &q0_32;
+ const char4 val0_8 = make_char4(values[q0_8[0]], values[q0_8[1]], values[q0_8[2]], values[q0_8[3]]);
+
+ const int q1_32 = (q4 >> 4) & 0x0F0F0F0F;
+ const int8_t * q1_8 = (const int8_t *) &q1_32;
+ const char4 val1_8 = make_char4(values[q1_8[0]], values[q1_8[1]], values[q1_8[2]], values[q1_8[3]]);
+
+ return make_int2(*((const int *) &val0_8), *((const int *) &val1_8));
+}
+
+__device__ __forceinline__ void vec_dot_iq4_k_r4_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
+
+ const block_iq4_k_r4 * bq4 = (const block_iq4_k_r4 *)vbq + kbx;
+
+ // iqs is 0...28 in steps of 2
+ const int ib16 = iqs/2;
+ const float d8 = __low2float(bq8_1[ib16/2].ds);
+ const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2);
+
+ int ib32 = ib16/2;
+ int is = ib16%2;
+ int scales;
+ const uint32_t * scales_l = (const uint32_t *)bq4->scales_l;
+ const uint32_t * scales_h = (const uint32_t *)bq4->scales_h;
+ scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020);
+ const int8_t * s8 = (const int8_t *)&scales;
+ int2 val1;
+ const int * q4 = (const int *)bq4->qs + 16*ib32;
+ for (int i = 0; i < 4; ++i) {
+ auto values1 = iq4k_values + (((bq4->extra[i+4*is] >> ib32) & 1) << 4);
+ int sumi1 = 0;
+ val1 = get_int_from_table_16(q4[i+4*is+0], values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1));
+ val1 = get_int_from_table_16(q4[i+4*is+8], values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1));
+ const float d = __half2float(bq4->d[i]) * d8;
+ result[i] += d * sumi1 * s8[i];
+ }
}
#define VDR_IQ4_KS_Q8_1_MMVQ 4
#define VDR_IQ4_KS_Q8_1_MMQ 4
-__device__ __forceinline__ float vec_dot_iq4_ks_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq4_ks_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
float scale = *(const float *)vbq;
const block_iq4_ks * bq4 = (const block_iq4_ks *)((const char *)vbq + sizeof(float)) + kbx;
@@ -251,14 +331,14 @@ __device__ __forceinline__ float vec_dot_iq4_ks_q8_1(
sumi = ggml_cuda_dp4a(v1, q8[j+0], sumi);
sumi = ggml_cuda_dp4a(v2, q8[j+4], sumi);
}
- return dl * __low2float(bq8_1[ib32].ds) * sumi;
+ *result += dl * __low2float(bq8_1[ib32].ds) * sumi;
}
#define VDR_IQ4_KSS_Q8_1_MMVQ 4
#define VDR_IQ4_KSS_Q8_1_MMQ 4
-__device__ __forceinline__ float vec_dot_iq4_kss_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq4_kss_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
float scale = *(const float *)vbq;
const block_iq4_kss * bq4 = (const block_iq4_kss *)((const char *)vbq + sizeof(float)) + kbx;
@@ -280,7 +360,7 @@ __device__ __forceinline__ float vec_dot_iq4_kss_q8_1(
sumi = ggml_cuda_dp4a(v1, q8[j+0], sumi);
sumi = ggml_cuda_dp4a(v2, q8[j+4], sumi);
}
- return dl * __low2float(bq8_1[ib32].ds) * sumi;
+ *result += dl * __low2float(bq8_1[ib32].ds) * sumi;
}
#define VDR_IQ5_K_Q8_1_MMVQ 4
@@ -292,9 +372,8 @@ __device__ __forceinline__ int int_from_table(const uint8_t * a8, const uint8_t
return v1 | (v2 << 16);
}
-__device__ __forceinline__ float vec_dot_iq5_k_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
-
+__device__ __forceinline__ void vec_dot_iq5_k_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
const block_iq5_k * bq5 = (const block_iq5_k *) vbq + kbx;
const uint8_t * all_values = (const uint8_t *)iq5nl_values;
@@ -325,11 +404,51 @@ __device__ __forceinline__ float vec_dot_iq5_k_q8_1(
const uint8_t sh = bq5->scales_h[i4/2] >> 2*(i4%2);
const int ls1 = (((bq5->scales_l[2*(i4/2)+0] >> 4*(i4%2)) & 0xf) | ((sh << 4) & 0x30)) - 32;
const int ls2 = (((bq5->scales_l[2*(i4/2)+1] >> 4*(i4%2)) & 0xf) | ((sh << 0) & 0x30)) - 32;
- return d5 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2);
+ *result += d5 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2);
+}
+
+__device__ __forceinline__ void vec_dot_iq5_k_r4_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
+
+ const block_iq5_k_r4 * bq5 = (const block_iq5_k_r4 *)vbq + kbx;
+
+ // iqs is 0...28 in steps of 2
+ const int ib16 = iqs/2;
+ const float d8 = __low2float(bq8_1[ib16/2].ds);
+ const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2);
+
+ int ib32 = ib16/2;
+ int is = ib16%2;
+ int scales;
+ const uint32_t * scales_l = (const uint32_t *)bq5->scales_l;
+ const uint32_t * scales_h = (const uint32_t *)bq5->scales_h;
+ scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) | (((scales_h[2*(ib32%2)+is] >> 2*(ib32/2)) & 0x03030303) << 4), 0x20202020);
+ const int8_t * s8 = (const int8_t *)&scales;
+ int2 val1;
+ const int * q4 = (const int *)bq5->qs + 16*ib32;
+ const int * qh = (const int *)bq5->qh + 4*ib32;
+ int aux32[2];
+ const uint8_t * aux8 = (const uint8_t *)aux32;
+ for (int i = 0; i < 4; ++i) {
+ auto values1 = iq5nl_values + (((bq5->extra[i+4*is] >> ib32) & 1) << 5);
+ int sumi1 = 0;
+ aux32[0] = ((q4[i+4*is+0] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+0)) & 0x01010101) << 4);
+ aux32[1] = ((q4[i+4*is+0] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+1)) & 0x01010101) << 4);
+ val1.x = int_from_table(aux8+0, (const uint8_t *)values1);
+ val1.y = int_from_table(aux8+4, (const uint8_t *)values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[2], sumi1));
+ aux32[0] = ((q4[i+4*is+8] >> 0) & 0x0f0f0f0f) | (((qh[i] >> (2*is+4)) & 0x01010101) << 4);
+ aux32[1] = ((q4[i+4*is+8] >> 4) & 0x0f0f0f0f) | (((qh[i] >> (2*is+5)) & 0x01010101) << 4);
+ val1.x = int_from_table(aux8+0, (const uint8_t *)values1);
+ val1.y = int_from_table(aux8+4, (const uint8_t *)values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[1], ggml_cuda_dp4a(val1.y, q8[3], sumi1));
+ const float d = __half2float(bq5->d[i]) * d8;
+ result[i] += d * sumi1 * s8[i];
+ }
}
-__device__ __forceinline__ float vec_dot_iq5_ks_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq5_ks_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
float scale = *(const float *)vbq;
const block_iq5_ks * bq5 = (const block_iq5_ks *)((const char *)vbq + sizeof(float)) + kbx;
@@ -358,15 +477,61 @@ __device__ __forceinline__ float vec_dot_iq5_ks_q8_1(
}
const int ls1 = (bq5->scales[2*(i4/2)+0] & 254) - 127;
const int ls2 = (bq5->scales[2*(i4/2)+1] & 254) - 127;
- return scale * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2);
+ *result += scale * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2);
+}
+
+__device__ __forceinline__ void vec_dot_iq3_k_r4_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
+
+ const block_iq3_k_r4 * bq3 = (const block_iq3_k_r4 *)vbq + kbx;
+
+ // iqs is 0...30 in steps of 2
+ const int ib16 = iqs/2;
+ const float d8 = __low2float(bq8_1[ib16/2].ds);
+ const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2);
+
+ int ib32 = ib16/2;
+ int is = ib16%2;
+ int scales[2];
+ const uint32_t * scales_l = (const uint32_t *)bq3->scales_l;
+ const uint32_t * scales_h = (const uint32_t *)bq3->scales_h;
+
+ scales[0] = (((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f) << 1) | 0x01010101;
+ scales[1] = (scales_h[is] >> ib32) & 0x01010101;
+ // This is not faster. Why?
+ //scales[1] = __vcmpeq4((scales_h[is] >> ib32) & 0x01010101, 0x01010101);
+ //scales[0] = __vsub4(scales[0] ^ scales[1], scales[1]);
+ const int8_t * s8 = (const int8_t *)scales;
+ int2 val1;
+ const int * q2 = (const int *)bq3->qs + 8*ib32 + 4*is;
+ const int * qh = (const int *)bq3->qh + 4*ib32;
+ int aux32[2];
+ const uint8_t * aux8 = (const uint8_t *)aux32;
+ for (int i = 0; i < 4; ++i) {
+ auto values1 = iq3nl_values + (((bq3->extra[i+4*is] >> ib32) & 1) << 3);
+ int sumi1 = 0;
+ int h = qh[i] >> 4*is;
+ aux32[0] = ((q2[i] >> 0) & 0x03030303) | ((h << 2) & 0x04040404);
+ aux32[1] = ((q2[i] >> 2) & 0x03030303) | ((h << 1) & 0x04040404);
+ val1.x = int_from_table(aux8+0, (const uint8_t *)values1);
+ val1.y = int_from_table(aux8+4, (const uint8_t *)values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[1], sumi1));
+ aux32[0] = ((q2[i] >> 4) & 0x03030303) | ((h >> 0) & 0x04040404);
+ aux32[1] = ((q2[i] >> 6) & 0x03030303) | ((h >> 1) & 0x04040404);
+ val1.x = int_from_table(aux8+0, (const uint8_t *)values1);
+ val1.y = int_from_table(aux8+4, (const uint8_t *)values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[2], ggml_cuda_dp4a(val1.y, q8[3], sumi1));
+ const float d = __half2float(bq3->d[i]) * d8;
+ result[i] += d * sumi1 * s8[i] * (s8[i+4] ? -1 : 1);
+ //result[i] += d * sumi1 * s8[i];
+ }
}
#define VDR_IQ6_K_Q8_1_MMVQ 4
#define VDR_IQ6_K_Q8_1_MMQ 4
-__device__ __forceinline__ float vec_dot_iq6_k_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
-
+__device__ __forceinline__ void vec_dot_iq6_k_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
const block_iq6_k * bq6 = (const block_iq6_k *) vbq + kbx;
const uint8_t * all_values = (const uint8_t *)iq6nl_values;
@@ -395,7 +560,7 @@ __device__ __forceinline__ float vec_dot_iq6_k_q8_1(
sumi2 = ggml_cuda_dp4a(v2, q8_2[j], sumi2);
}
const float d6 = __half2float(bq6->d);
- return d6 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * bq6->scales[4*(i4/2)+(i4%2)] + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * bq6->scales[4*(i4/2)+(i4%2)+2]);
+ *result += d6 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * bq6->scales[4*(i4/2)+(i4%2)] + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * bq6->scales[4*(i4/2)+(i4%2)+2]);
}
static const __device__ uint32_t iq2k_table[512] = {
@@ -472,8 +637,8 @@ __device__ __forceinline__ int int_from_table_4(const uint8_t * a8, const int *
#define VDR_IQ2_K_Q8_1_MMVQ 4
#define VDR_IQ2_K_Q8_1_MMQ 4
-__device__ __forceinline__ float vec_dot_iq2_k_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq2_k_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
// iqs is 0, 4, 8, 12, 16, 20, 24, 28
// we have 16 packed quants (when cast to int)
@@ -524,18 +689,17 @@ __device__ __forceinline__ float vec_dot_iq2_k_q8_1(
v2 = int_from_table_4(a8 + 4, values);
int sumi4 = ggml_cuda_dp4a(v2, q8_4[1], ggml_cuda_dp4a(v1, q8_4[0], 0)) * s8[3];
- return __half2float(bq2->d) * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1
- + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2
- + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3
- + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4);
-
+ *result += __half2float(bq2->d) * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1
+ + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2
+ + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3
+ + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4);
}
#define VDR_IQ2_KS_Q8_1_MMVQ 4
#define VDR_IQ2_KS_Q8_1_MMQ 4
-__device__ __forceinline__ float vec_dot_iq2_ks_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq2_ks_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
float scale = *(const half *)vbq;
const block_iq2_ks * bq2 = (const block_iq2_ks *)((const char *)vbq + sizeof(half)) + kbx;
@@ -584,10 +748,51 @@ __device__ __forceinline__ float vec_dot_iq2_ks_q8_1(
v2 = int_from_table_4(a8 + 4, values);
int sumi4 = ggml_cuda_dp4a(v2, q8_4[1], ggml_cuda_dp4a(v1, q8_4[0], 0)) * s8[3];
- return scale * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1
- + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2
- + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3
- + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4);
+ *result += scale * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1
+ + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2
+ + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3
+ + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4);
+}
+
+__device__ __forceinline__ void vec_dot_iq2_k_r4_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
+
+ const block_iq2_k_r4 * bq2 = (const block_iq2_k_r4 *)vbq + kbx;
+
+ // iqs is 0...30 in steps of 2
+ const int ib16 = iqs/2;
+ const float d8 = __low2float(bq8_1[ib16/2].ds);
+ const int32_t * q8 = (const int *)bq8_1[ib16/2].qs + 4*(ib16%2);
+
+ int ib32 = ib16/2;
+ int is = ib16%2;
+ const int * scales_l = (const int *)bq2->scales;
+
+ const int * all_values = (const int *)iq2k_table;
+
+ int scales = __vsub4(((scales_l[2*(ib32%4)+is] >> 4*(ib32/4)) & 0x0f0f0f0f), 0x08080808);
+ const int8_t * s8 = (const int8_t *)&scales;
+ int2 val1;
+ const int * q2 = (const int *)bq2->qs + 8*ib32 + 4*is;
+ int aux32[2];
+ const uint8_t * aux8 = (const uint8_t *)aux32;
+#pragma unroll
+ for (int i = 0; i < 4; ++i) {
+ auto values1 = all_values + (((bq2->extra[i+4*is] >> ib32) & 1) << 8);
+ int sumi1 = 0;
+ aux32[0] = ((q2[i] >> 0) & 0x03030303);
+ aux32[1] = ((q2[i] >> 2) & 0x03030303);
+ val1.x = int_from_table_4(aux8+0, values1);
+ val1.y = int_from_table_4(aux8+4, values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[0], ggml_cuda_dp4a(val1.y, q8[1], sumi1));
+ aux32[0] = ((q2[i] >> 4) & 0x03030303);
+ aux32[1] = ((q2[i] >> 6) & 0x03030303);
+ val1.x = int_from_table_4(aux8+0, values1);
+ val1.y = int_from_table_4(aux8+4, values1);
+ sumi1 = ggml_cuda_dp4a(val1.x, q8[2], ggml_cuda_dp4a(val1.y, q8[3], sumi1));
+ const float d = __half2float(bq2->d[i]) * d8;
+ result[i] += d * sumi1 * s8[i];
+ }
}
#define VDR_IQ3_K_Q8_1_MMVQ 4
@@ -608,8 +813,8 @@ __device__ __forceinline__ int int_from_table_2(const uint8_t * a8, const uint16
return values[a8[0] | (a8[1] << 3)] | (values[a8[2] | (a8[3] << 3)] << 16);
}
-__device__ __forceinline__ float vec_dot_iq3_k_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs) {
+__device__ __forceinline__ void vec_dot_iq3_k_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
const block_iq3_k * bq3 = (const block_iq3_k *) vbq + kbx;
int iqs = iiqs/4;
@@ -667,15 +872,15 @@ __device__ __forceinline__ float vec_dot_iq3_k_q8_1(
const float d = __half2float(bq3->d);
const uint16_t * sl16 = (const uint16_t *)bq3->scales_l + 2*ib128;
aux32 = ((((sl16[0] | (sl16[1] << 16)) >> shift) & 0x0f0f0f0f) << 1) | 0x01010101;
- return d * (__low2float(bq8_1[4*ib128+0].ds) * aux8[0] * (sh & 0x01 ? -1 : 1) * sumi[0] +
- __low2float(bq8_1[4*ib128+1].ds) * aux8[1] * (sh & 0x04 ? -1 : 1) * sumi[1] +
- __low2float(bq8_1[4*ib128+2].ds) * aux8[2] * (sh & 0x10 ? -1 : 1) * sumi[2] +
- __low2float(bq8_1[4*ib128+3].ds) * aux8[3] * (sh & 0x40 ? -1 : 1) * sumi[3]);
+ *result += d * (__low2float(bq8_1[4*ib128+0].ds) * aux8[0] * (sh & 0x01 ? -1 : 1) * sumi[0] +
+ __low2float(bq8_1[4*ib128+1].ds) * aux8[1] * (sh & 0x04 ? -1 : 1) * sumi[1] +
+ __low2float(bq8_1[4*ib128+2].ds) * aux8[2] * (sh & 0x10 ? -1 : 1) * sumi[2] +
+ __low2float(bq8_1[4*ib128+3].ds) * aux8[3] * (sh & 0x40 ? -1 : 1) * sumi[3]);
}
-__device__ __forceinline__ float vec_dot_iq1_bn_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq1_bn_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
half d16; memcpy(&d16, vbq, sizeof(d16));
float scale = d16;
@@ -709,7 +914,7 @@ __device__ __forceinline__ float vec_dot_iq1_bn_q8_1(
sumi = __dp4a(val[0], q8[4*l+0], __dp4a(val[1], q8[4*l+1], __dp4a(val[2], q8[4*l+2], __dp4a(val[3], q8[4*l+3], sumi))));
}
float2 d8 = __half22float2(bq8_1[iqs].ds);
- return scale * (d8.x * sumi - d8.y);
+ *result += scale * (d8.x * sumi - d8.y);
#else
static const uint16_t k_mult[5] = {81, 27, 9, 3, 1};
const int8_t * q8 = bq8_1[iqs].qs;
@@ -729,12 +934,12 @@ __device__ __forceinline__ float vec_dot_iq1_bn_q8_1(
sumi += q8[0]*(vs - 1);
q8++;
}
- return scale * __low2float(bq8_1[iqs].ds) * sumi;
+ *result += scale * __low2float(bq8_1[iqs].ds) * sumi;
#endif
}
-__device__ __forceinline__ float vec_dot_iq2_bn_q8_1(
- const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+__device__ __forceinline__ void vec_dot_iq2_bn_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
float scale = *(const float *)vbq;
const block_iq2_bn * bq2 = (const block_iq2_bn *)((const char *)vbq + sizeof(float)) + kbx;
@@ -756,7 +961,7 @@ __device__ __forceinline__ float vec_dot_iq2_bn_q8_1(
}
auto d8l = __half22float2(bq8_1[0].ds);
auto d8h = __half22float2(bq8_1[1].ds);
- return scale * (d8l.x * (sumi1 + 0.25f*sumi2) + d8h.x * (sumi3 + 0.25f * sumi4) - 0.5f*d8l.y - 0.5f*d8h.y);
+ *result += scale * (d8l.x * (sumi1 + 0.25f*sumi2) + d8h.x * (sumi3 + 0.25f * sumi4) - 0.5f*d8l.y - 0.5f*d8h.y);
#else
int sumi1 = 0, sumi2 = 0, sumi3 = 0, sumi4 = 0;
auto q8l = bq8_1[0].qs + 8*iqs;
@@ -770,7 +975,7 @@ __device__ __forceinline__ float vec_dot_iq2_bn_q8_1(
}
auto d8l = __half22float2(bq8_1[0].ds);
auto d8h = __half22float2(bq8_1[1].ds);
- return scale * (d8l.x * (sumi1 + 0.25f*sumi2) + 0.0625f * d8h.x*(sumi3 + 0.25f*sumi4) - 0.5f*d8l.y - 0.5f*d8h.y);
+ *result += scale * (d8l.x * (sumi1 + 0.25f*sumi2) + 0.0625f * d8h.x*(sumi3 + 0.25f*sumi4) - 0.5f*d8l.y - 0.5f*d8h.y);
#endif
}
@@ -800,6 +1005,38 @@ void mul_mat_vec_iq4_k_q8_1_cuda(
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_K, VDR_IQ4_K_Q8_1_MMVQ, vec_dot_iq4_k_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
}
+void mul_mat_vec_iq4_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
+
+ iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_K_R4, 2, vec_dot_iq4_k_r4_q8_1, 4>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+}
+
+void mul_mat_vec_iq5_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
+
+ iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ5_K_R4, 2, vec_dot_iq5_k_r4_q8_1, 4>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+}
+
+void mul_mat_vec_iq2_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
+
+ iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ2_K_R4, 2, vec_dot_iq2_k_r4_q8_1, 4>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+}
+
+void mul_mat_vec_iq3_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
+
+ iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ3_K_R4, 2, vec_dot_iq3_k_r4_q8_1, 4>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+}
+
void mul_mat_vec_iq4_ks_q8_1_cuda(
const void * vx, const void * vy, float * dst, const char * ids_data,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh
index b81d2114..228c513b 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cuh
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh
@@ -60,3 +60,23 @@ void mul_mat_vec_iq2_bn_q8_1_cuda(
const void * vx, const void * vy, float * dst, const char * ids_data,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+
+void mul_mat_vec_iq2_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+
+void mul_mat_vec_iq3_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+
+void mul_mat_vec_iq4_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+
+void mul_mat_vec_iq5_k_r4_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu
index 89b74f4b..d7bed266 100644
--- a/ggml/src/ggml-cuda/mmvq.cu
+++ b/ggml/src/ggml-cuda/mmvq.cu
@@ -542,6 +542,18 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm
case GGML_TYPE_IQ3_S:
mul_mat_vec_iq3_s_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
+ case GGML_TYPE_IQ2_K_R4:
+ mul_mat_vec_iq2_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+ break;
+ case GGML_TYPE_IQ3_K_R4:
+ mul_mat_vec_iq3_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+ break;
+ case GGML_TYPE_IQ4_K_R4:
+ mul_mat_vec_iq4_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+ break;
+ case GGML_TYPE_IQ5_K_R4:
+ mul_mat_vec_iq5_k_r4_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+ break;
default:
GGML_ABORT("fatal error");
break;
@@ -655,6 +667,10 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) {
case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ2_K_R4:
+ case GGML_TYPE_IQ3_K_R4:
+ case GGML_TYPE_IQ4_K_R4:
+ case GGML_TYPE_IQ5_K_R4:
return true;
default:
return false;