diff options
author | Iwan Kawrakow <iwan.kawrakow@gmail.com> | 2024-08-07 17:25:21 +0300 |
---|---|---|
committer | Kawrakow <48489457+ikawrakow@users.noreply.github.com> | 2024-08-09 16:00:31 +0200 |
commit | c3f5e4d9a7ddad8e7af6dd43807815496acddab3 (patch) | |
tree | 753d98457de5ba555c7c00b3c680349fa531ab66 | |
parent | a9b3f4a54b544a6e9adde65673533e0154d7767a (diff) |
iq6_k: CUDA dequantize
We get a slightly better PPL for LLaMA-3.1-8B compared to q6_K
(0.14% vs 0.26% quantization error).
-rw-r--r-- | ggml/src/ggml-cuda.cu | 1 | ||||
-rw-r--r-- | ggml/src/ggml-cuda/convert.cu | 47 | ||||
-rw-r--r-- | ggml/src/iqk/iqk_quantize.cpp | 11 |
3 files changed, 54 insertions, 5 deletions
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index a115a1b4..7641d5b5 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -2757,6 +2757,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_TYPE_IQ3_K: case GGML_TYPE_IQ4_K: case GGML_TYPE_IQ5_K: + case GGML_TYPE_IQ6_K: case GGML_TYPE_IQ1_BN: case GGML_TYPE_IQ2_BN: case GGML_TYPE_IQ2_TN: diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu index 47ab92f0..db5fd2dd 100644 --- a/ggml/src/ggml-cuda/convert.cu +++ b/ggml/src/ggml-cuda/convert.cu @@ -591,6 +591,43 @@ static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_ } } +#define A_IQ6K -127.f +#define B_IQ6K 6.2568f +#define C_IQ6K 0.11218f +#define D_IQ6K 0.0011972f +#define S_IQ6K 1 + +template<typename dst_t> +static __global__ void dequantize_block_iq6_k(const void * __restrict__ vx, dst_t * __restrict__ yy) { + + const int i = blockIdx.x; + const block_iq6_k * x = (const block_iq6_k *) vx; + + const int tid = threadIdx.x; + int ib64 = tid/8; // 0...3 + int il = tid%8; // 0...7 + dst_t * y = yy + i*QK_K + 64*ib64 + 2*il; + const float d = (float)x[i].d; + const float dl1 = d * x[i].scales[4*ib64+0]; + const float dl2 = d * x[i].scales[4*ib64+1]; + const float dl3 = d * x[i].scales[4*ib64+2]; + const float dl4 = d * x[i].scales[4*ib64+3]; + const uint8_t * qs = x[i].qs + 32*ib64 + 2*il; + const uint8_t * qh = x[i].qh + 32*(ib64/2) + 2*il; + const uint8_t extra = x[i].extra >> 4*(ib64%4); + for (int j = 0; j < 2; ++j) { + const uint8_t h1 = qh[j] >> 4*(ib64%2), h2 = qh[j+16] >> 4*(ib64%2); + float q1 = (qs[j+ 0] & 0xf) | ((h1 & 0x03) << 4); + float q2 = (qs[j+16] & 0xf) | ((h2 & 0x03) << 4); + float q3 = (qs[j+ 0] >> 4) | ((h1 & 0x0c) << 2); + float q4 = (qs[j+16] >> 4) | ((h2 & 0x0c) << 2); + y[j+ 0] = dl1 * (A_IQ6K + q1*(B_IQ6K + q1*(-C_IQ6K + q1*D_IQ6K)) + (extra & 1 ? S_IQ6K : 0)); + y[j+16] = dl2 * (A_IQ6K + q2*(B_IQ6K + q2*(-C_IQ6K + q2*D_IQ6K)) + (extra & 2 ? S_IQ6K : 0)); + y[j+32] = dl3 * (A_IQ6K + q3*(B_IQ6K + q3*(-C_IQ6K + q3*D_IQ6K)) + (extra & 4 ? S_IQ6K : 0)); + y[j+48] = dl4 * (A_IQ6K + q4*(B_IQ6K + q4*(-C_IQ6K + q4*D_IQ6K)) + (extra & 8 ? S_IQ6K : 0)); + } +} + template<typename dst_t> static __global__ void dequantize_block_iq2_k(const void * __restrict__ vx, dst_t * __restrict__ yy) { @@ -803,6 +840,12 @@ static void dequantize_row_iq5_k_cuda(const void * vx, dst_t * y, const int64_t dequantize_block_iq5_k<<<nb, 32, 0, stream>>>(vx, y); } +template<typename dst_t> +static void dequantize_row_iq6_k_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) { + const int nb = (k + QK_K - 1) / QK_K; + dequantize_block_iq6_k<<<nb, 32, 0, stream>>>(vx, y); +} + template <typename src_t, typename dst_t> static __global__ void convert_unary(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k) { const int64_t i = (int64_t)blockDim.x*blockIdx.x + threadIdx.x; @@ -877,6 +920,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) { return dequantize_row_iq4_k_cuda; case GGML_TYPE_IQ5_K: return dequantize_row_iq5_k_cuda; + case GGML_TYPE_IQ6_K: + return dequantize_row_iq6_k_cuda; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_cuda; case GGML_TYPE_F32: @@ -938,6 +983,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) { return dequantize_row_iq4_k_cuda; case GGML_TYPE_IQ5_K: return dequantize_row_iq5_k_cuda; + case GGML_TYPE_IQ6_K: + return dequantize_row_iq6_k_cuda; case GGML_TYPE_IQ3_S: return dequantize_row_iq3_s_cuda; case GGML_TYPE_F16: diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp index 9d43506c..ead29b04 100644 --- a/ggml/src/iqk/iqk_quantize.cpp +++ b/ggml/src/iqk/iqk_quantize.cpp @@ -1522,6 +1522,7 @@ size_t quantize_iq5_k(const float * src, void * dst, int64_t nrows, int64_t n_pe #define B_IQ6K 6.2568f #define C_IQ6K 0.11218f #define D_IQ6K 0.0011972f +#define S_IQ6K 1.f void dequantize_row_iq6_k(const block_iq6_k * x, float * y, int64_t k) { assert(k % QK_K == 0); @@ -1543,10 +1544,10 @@ void dequantize_row_iq6_k(const block_iq6_k * x, float * y, int64_t k) { float dl2 = d * sl[4*ib64 + 1]; float dl3 = d * sl[4*ib64 + 2]; float dl4 = d * sl[4*ib64 + 3]; - float m1 = extra & 1 ? 1 : 0; - float m2 = extra & 2 ? 1 : 0; - float m3 = extra & 4 ? 1 : 0; - float m4 = extra & 8 ? 1 : 0; + float m1 = extra & 1 ? S_IQ6K : 0; + float m2 = extra & 2 ? S_IQ6K : 0; + float m3 = extra & 4 ? S_IQ6K : 0; + float m4 = extra & 8 ? S_IQ6K : 0; for (int j = 0; j < 16; ++j) { float q1 = ((qs[j+ 0] & 0xf) | (((qh[j+ 0] >> shift) & 0x03) << 4)); float q2 = ((qs[j+16] & 0xf) | (((qh[j+16] >> shift) & 0x03) << 4)); @@ -1868,7 +1869,7 @@ size_t quantize_iq6_k(const float * src, void * dst, int64_t nrows, int64_t n_pe float values[128]; for (int i = 0; i < 64; ++i) { values[i] = A_IQ6K + B_IQ6K*i - C_IQ6K*i*i + D_IQ6K*i*i*i; - values[i+64] = values[i] + 1.f; + values[i+64] = values[i] + S_IQ6K; } for (int64_t row = 0; row < nrows; ++row) { quantize_row_iq6_k_impl(src, (void *)qrow, n_per_row, imatrix, values, values + 64); |