summaryrefslogtreecommitdiff
path: root/ggml/src/ggml-cuda/template-instances
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-07-20 10:05:23 +0200
committerGitHub <noreply@github.com>2025-07-20 10:05:23 +0200
commitf989fb03bd12752ad6e93717ca4bd298d5001d99 (patch)
tree7a127aba5c05667904b7e28a46d07c2d295ef619 /ggml/src/ggml-cuda/template-instances
parent07673c6c33753487dd054dcff37f19d93d6c56d3 (diff)
Adding IQ1_KT - 1.75 bpw SOTA quants (#616)
* iq1_kt: basics * iq1_kt: CUDA dequantize Testing with LlaMA-3.1-8B-Instruct, we get almost the same PPL as iq2_xxs, so about 0.2 bpw fewer bits for the same quality. * iq1_kt: CUDA MMQ * iq1_kt: CUDA MMVQ * iq1_kt: AVX2 GEMM/GEMV * iq1_kt: convert/repack to q8_0_r8 (AVX2) * iq1_kt: slightly faster GEMV 18.6 t/s -> 19.4 t/s * iq1_kt: NEON GEMM/GEMV Pathetic as usual * iq1_kt: slightly faster NEON - still pathetic * iq1_kt: tiny bit better GEMV on NEON * iq1_kt: convert/repack to q8_0_r8 (NEON) * iq1_kt: very slightly faster convert/repack to q8_0_r8 on NEON * Adding frgotten file * iq1_kt: add to constants.py --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml-cuda/template-instances')
-rw-r--r--ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_kt.cu81
1 files changed, 81 insertions, 0 deletions
diff --git a/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_kt.cu b/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_kt.cu
new file mode 100644
index 00000000..1a3590e5
--- /dev/null
+++ b/ggml/src/ggml-cuda/template-instances/mmq-instance-iq1_kt.cu
@@ -0,0 +1,81 @@
+// This file has been autogenerated by generate_cu_files.py, do not edit manually.
+
+#include "../mmq.cuh"
+
+template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq1_kt(
+ const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
+
+ constexpr uint32_t ka = 0xCBAC1FED;
+ constexpr uint32_t km = 0x3f3f3f3f;
+
+#ifdef INT8_MMA_AVAILABLE
+ int * x_qs = (int *) x_tile;
+ float * x_df = (float *) (x_qs + WARP_SIZE*2);
+#else
+ constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y);
+ int * x_qs = (int *) x_tile;
+ float * x_df = (float *) (x_qs + txs.qs);
+#endif // INT8_MMA_AVAILABLE
+
+ const int kqsx = threadIdx.x;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + threadIdx.y;
+
+ if (need_check) {
+ i = min(i, i_max);
+ }
+
+ const block_iq1_kt * bxi = (const block_iq1_kt *)(x + i*stride + sizeof(float)) + kbx0;
+
+ int ib32 = kqsx/4;
+ int j = kqsx%4;
+ uint32_t val = bxi->ql[kqsx] + ((bxi->qh[kqsx%16] << (8 - 4*(kqsx/16))) & 0xf00) + ((bxi->sh[kqsx/4] << (8 - (kqsx%4))) & 0x1000) + 4096;
+ int2 v = {0, 0};
+ for (int k = 0; k < 4; ++k) {
+ val *= ka;
+ v.x |= (ggml_cuda_dp4a(val & km, 0x01010101, -126) & 0xff) << 8*k;
+ }
+ for (int k = 0; k < 4; ++k) {
+ val *= ka;
+ v.y |= (ggml_cuda_dp4a(val & km, 0x01010101, -126) & 0xff) << 8*k;
+ }
+#ifdef INT8_MMA_AVAILABLE
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*ib32 + 2*j + 0] = v.x;
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*ib32 + 2*j + 1] = v.y;
+#else
+ x_qs[i*(2*WARP_SIZE + 1) + 8*ib32 + 2*j + 0] = v.x;
+ x_qs[i*(2*WARP_SIZE + 1) + 8*ib32 + 2*j + 1] = v.y;
+#endif // INT8_MMA_AVAILABLE
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
+ int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4);
+
+ if (need_check) {
+ i = min(i, i_max);
+ }
+
+ const float * dptr = (const float *)(x + i*stride);
+ const float d = dptr[0];
+ const block_iq1_kt * bxi = (const block_iq1_kt *)(dptr + 1) + kbx0;
+ const int ls = iq4k_values[bxi->sh[threadIdx.x % 8] & 0xf];
+
+#ifdef INT8_MMA_AVAILABLE
+ x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = d * ls;
+#else
+ x_df[i*(WARP_SIZE/4) + i/4 + threadIdx.x % 8] = d * ls;
+#endif // INT8_MMA_AVAILABLE
+ }
+}
+
+template <int mmq_x, int mmq_y, int nwarps, bool need_check>
+struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ1_KT> {
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq1_kt<mmq_y, nwarps, need_check>;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma<mmq_x, mmq_y, nwarps, MMQ_Q8_1_DS_LAYOUT_D4>;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
+};
+
+DECL_MMQ_CASE(GGML_TYPE_IQ1_KT);