summaryrefslogtreecommitdiff
path: root/ggml/src/ggml-cuda
diff options
context:
space:
mode:
authorIwan Kawrakow <iwan.kawrakow@gmail.com>2024-07-29 12:38:46 +0300
committerKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-08-01 09:38:06 +0200
commit5d341757bc73efee0deba07a17679a965037753b (patch)
tree33db7a06b6b3ae1067a1d3db36192926705c678f /ggml/src/ggml-cuda
parent06e255ac9da49cabde466b9ef8b3c09c0f8d8dd1 (diff)
iq5_k: Basics
Quantize/dequantize, CUDA dequantize
Diffstat (limited to 'ggml/src/ggml-cuda')
-rw-r--r--ggml/src/ggml-cuda/common.cuh7
-rw-r--r--ggml/src/ggml-cuda/convert.cu37
-rw-r--r--ggml/src/ggml-cuda/mmvq.cu12
-rw-r--r--ggml/src/ggml-cuda/vecdotq.cuh32
4 files changed, 88 insertions, 0 deletions
diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh
index 12eebb00..ff37dd56 100644
--- a/ggml/src/ggml-cuda/common.cuh
+++ b/ggml/src/ggml-cuda/common.cuh
@@ -684,6 +684,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ4_K> {
};
template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ5_K> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR4_XS;
+ static constexpr int qi = QI4_XS;
+};
+
+template<>
struct ggml_cuda_type_traits<GGML_TYPE_IQ3_S> {
static constexpr int qk = QK_K;
static constexpr int qr = QR3_S;
diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index 6dd0fc50..f388e9f3 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -544,6 +544,33 @@ static __global__ void dequantize_block_iq4_k(const void * __restrict__ vx, dst_
}
template<typename dst_t>
+static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
+
+ const int i = blockIdx.x;
+ const block_iq5_k * x = (const block_iq5_k *) vx;
+
+ const int tid = threadIdx.x;
+ int ib64 = tid/8; // 0...3
+ int il = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 64*ib64 + 2*il;
+ const float d = (float)x[i].d;
+ const float dl1 = d * (((x[i].scales_l[2*ib64+0] & 0xf) | ((x[i].scales_h[ib64] << 4) & 0x30)) - 32);
+ const float dl2 = d * (((x[i].scales_l[2*ib64+0] >> 4) | ((x[i].scales_h[ib64] << 2) & 0x30)) - 32);
+ const float dl3 = d * (((x[i].scales_l[2*ib64+1] & 0xf) | ((x[i].scales_h[ib64] >> 0) & 0x30)) - 32);
+ const float dl4 = d * (((x[i].scales_l[2*ib64+1] >> 4) | ((x[i].scales_h[ib64] >> 2) & 0x30)) - 32);
+ const uint8_t * qs = x[i].qs + 32*ib64 + 2*il;
+ const uint8_t * qh = x[i].qh + 2*il;
+ const uint8_t extra = x[i].extra >> 4*(ib64%4);
+ for (int j = 0; j < 2; ++j) {
+ const uint8_t h1 = qh[j] >> 2*(ib64%4), h2 = qh[j+16] >> 2*(ib64%4);
+ y[j+ 0] = dl1 * iq5nl_values[(qs[j+ 0] & 0xf) | ((h1 & 1) << 4) | ((extra << 5) & 0x20)];
+ y[j+16] = dl2 * iq5nl_values[(qs[j+16] & 0xf) | ((h2 & 1) << 4) | ((extra << 4) & 0x20)];
+ y[j+32] = dl3 * iq5nl_values[(qs[j+ 0] >> 4) | ((h1 & 2) << 3) | ((extra << 3) & 0x20)];
+ y[j+48] = dl4 * iq5nl_values[(qs[j+16] >> 4) | ((h2 & 2) << 3) | ((extra << 2) & 0x20)];
+ }
+}
+
+template<typename dst_t>
static __global__ void dequantize_block_iq2_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
@@ -705,6 +732,12 @@ static void dequantize_row_iq4_k_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq5_k_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) {
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq5_k<<<nb, 32, 0, stream>>>(vx, y);
+}
+
+template<typename dst_t>
static void dequantize_row_iq2_k_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) {
const int nb = (k + QK_K - 1) / QK_K;
dequantize_block_iq2_k<<<nb, 32, 0, stream>>>(vx, y);
@@ -776,6 +809,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_K:
return dequantize_row_iq4_k_cuda;
+ case GGML_TYPE_IQ5_K:
+ return dequantize_row_iq5_k_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_S:
@@ -831,6 +866,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_K:
return dequantize_row_iq4_k_cuda;
+ case GGML_TYPE_IQ5_K:
+ return dequantize_row_iq5_k_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_S:
diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu
index b99dc245..776ca80f 100644
--- a/ggml/src/ggml-cuda/mmvq.cu
+++ b/ggml/src/ggml-cuda/mmvq.cu
@@ -25,6 +25,7 @@ static constexpr __device__ vec_dot_q_cuda_t get_vec_dot_q_cuda(ggml_type type)
type == GGML_TYPE_IQ4_NL ? vec_dot_iq4_nl_q8_1 :
type == GGML_TYPE_IQ4_XS ? vec_dot_iq4_xs_q8_1 :
type == GGML_TYPE_IQ4_K ? vec_dot_iq4_k_q8_1 :
+ type == GGML_TYPE_IQ5_K ? vec_dot_iq5_k_q8_1 :
type == GGML_TYPE_IQ2_K ? vec_dot_iq2_k_q8_1 :
type == GGML_TYPE_IQ3_S ? vec_dot_iq3_s_q8_1 :
nullptr;
@@ -49,6 +50,7 @@ static constexpr __device__ int get_vdr_mmvq(ggml_type type) {
type == GGML_TYPE_IQ4_NL ? VDR_IQ4_NL_Q8_1_MMVQ :
type == GGML_TYPE_IQ4_XS ? VDR_IQ4_XS_Q8_1_MMVQ :
type == GGML_TYPE_IQ4_K ? VDR_IQ4_K_Q8_1_MMVQ :
+ type == GGML_TYPE_IQ5_K ? VDR_IQ5_K_Q8_1_MMVQ :
type == GGML_TYPE_IQ2_K ? VDR_IQ2_K_Q8_1_MMVQ :
1;
}
@@ -354,6 +356,13 @@ static void mul_mat_vec_iq4_k_q8_1_cuda(
mul_mat_vec_q_cuda<GGML_TYPE_IQ4_K>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
}
+static void mul_mat_vec_iq5_k_q8_1_cuda(
+ const void * vx, const void * vy, float * dst,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
+
+ mul_mat_vec_q_cuda<GGML_TYPE_IQ5_K>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
+}
+
static void mul_mat_vec_iq2_k_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
@@ -452,6 +461,9 @@ void ggml_cuda_op_mul_mat_vec_q(
case GGML_TYPE_IQ4_K:
mul_mat_vec_iq4_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
+ case GGML_TYPE_IQ5_K:
+ mul_mat_vec_iq5_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
+ break;
case GGML_TYPE_IQ2_K:
mul_mat_vec_iq2_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
diff --git a/ggml/src/ggml-cuda/vecdotq.cuh b/ggml/src/ggml-cuda/vecdotq.cuh
index 97a5619f..414f580b 100644
--- a/ggml/src/ggml-cuda/vecdotq.cuh
+++ b/ggml/src/ggml-cuda/vecdotq.cuh
@@ -1274,6 +1274,38 @@ static __device__ __forceinline__ float vec_dot_iq4_k_q8_1(
return d * (sumi1 * ls1 + sumi2 * ls2);
}
+#define VDR_IQ5_K_Q8_1_MMVQ 4
+#define VDR_IQ5_K_Q8_1_MMQ 4
+
+// TODO
+static __device__ __forceinline__ float vec_dot_iq5_k_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+ return 0;
+
+// const block_iq5_k * bq4 = (const block_iq5_k *) vbq + kbx;
+// const uint8_t * all_values = (const uint8_t *)iq4k_values;
+//
+// // iqs is 0...28
+// const int ib32 = iqs/4;
+// // Why iqs/4 ?
+// const int32_t * q8 = (const int *)bq8_1[ib32].qs;
+// const uint16_t * q4 = (const uint16_t *)bq4->qs + 8*ib32;
+// const uint16_t extra = bq4->extra >> 2*ib32;
+// int v1, v2;
+// int sumi1 = 0, sumi2 = 0;
+// for (int j = 0; j < 4; ++j) {
+// const uint32_t aux32 = q4[2*j+0] | (q4[2*j+1] << 16);
+// get_int_from_table_16_shift(aux32, extra, all_values, v1, v2);
+// sumi1 = ggml_cuda_dp4a(v1, q8[j+0], sumi1);
+// sumi2 = ggml_cuda_dp4a(v2, q8[j+4], sumi2);
+// }
+// const float d = __half2float(bq4->d) * __low2float(bq8_1[ib32].ds);
+// const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2);
+// const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32;
+// const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32;
+// return d * (sumi1 * ls1 + sumi2 * ls2);
+}
+
#define VDR_IQ2_K_Q8_1_MMVQ 4
#define VDR_IQ2_K_Q8_1_MMQ 4