diff options
Diffstat (limited to 'ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_k_r4.cu')
-rw-r--r-- | ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_k_r4.cu | 86 |
1 files changed, 86 insertions, 0 deletions
diff --git a/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_k_r4.cu b/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_k_r4.cu new file mode 100644 index 00000000..d7b5a18e --- /dev/null +++ b/ggml/src/ggml-cuda/template-instances/mmq-instance-iq2_k_r4.cu @@ -0,0 +1,86 @@ +// This file has been autogenerated by generate_cu_files.py, do not edit manually. + +#include "../mmq.cuh" + +template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq2_k_r4( + const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { + +#ifdef INT8_MMA_AVAILABLE + int * x_qs = (int *) x_tile; + float * x_df = (float *) (x_qs + WARP_SIZE*2); +#else + constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16; + int * x_qs = (int *) x_tile; + float * x_df = (float *) (x_qs + txs.qs); +#endif // INT8_MMA_AVAILABLE + + const int kqsx = threadIdx.x/4; // 0...7 -> block of 32 + + uint32_t aux32[4]; + const uint8_t * aux8 = (const uint8_t *)aux32; +#pragma unroll + for (int i0 = 0; i0 < mmq_y; i0 += 4*nwarps) { + int i = i0 + 4*threadIdx.y + threadIdx.x%4; + + if (need_check) { + i = min(i, i_max); + } + int i4 = i/4; + int ir = i%4; + + const block_iq2_k_r4 * bxi = (const block_iq2_k_r4 *)(x + 4*i4*stride) + kbx0; + + const float d = __half2float(bxi->d[ir]); + + #pragma unroll + for (int l = 0; l < 2; ++l) { + + auto values_l = iq2nl_values + (((bxi->extra[ir+4*l] >> kqsx) & 1) << 2); + + const int ql = get_int_b4(bxi->qs, 8*kqsx + ir + 4*l); + aux32[0] = (ql >> 0) & 0x03030303; + aux32[1] = (ql >> 2) & 0x03030303; + aux32[2] = (ql >> 4) & 0x03030303; + aux32[3] = (ql >> 6) & 0x03030303; + + const char4 val0 = make_char4(values_l[aux8[ 0]], values_l[aux8[ 1]], values_l[aux8[ 2]], values_l[aux8[ 3]]); + const char4 val1 = make_char4(values_l[aux8[ 4]], values_l[aux8[ 5]], values_l[aux8[ 6]], values_l[aux8[ 7]]); + const char4 val2 = make_char4(values_l[aux8[ 8]], values_l[aux8[ 9]], values_l[aux8[10]], values_l[aux8[11]]); + const char4 val3 = make_char4(values_l[aux8[12]], values_l[aux8[13]], values_l[aux8[14]], values_l[aux8[15]]); + +#ifdef INT8_MMA_AVAILABLE + x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 0] = *(const int *)&val0; + x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 1] = *(const int *)&val1; + x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 2] = *(const int *)&val2; + x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 3] = *(const int *)&val3; +#else + x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 0] = *(const int *)&val0; + x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 1] = *(const int *)&val1; + x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 2] = *(const int *)&val2; + x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 3] = *(const int *)&val3; +#endif // INT8_MMA_AVAILABLE + } + + int is = 8*kqsx + ir; + float dl1 = d * (((bxi->scales[is%32] >> 4*(is/32)) & 0xf) - 8); + is += 4; + float dl2 = d * (((bxi->scales[is%32] >> 4*(is/32)) & 0xf) - 8); + +#ifdef INT8_MMA_AVAILABLE + x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = dl1; + x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = dl2; +#else + x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = dl1; + x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = dl2; +#endif // INT8_MMA_AVAILABLE + } +} + +template <int mmq_x, int mmq_y, int nwarps, bool need_check> +struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ2_K_R4> { + static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq2_k_r4<mmq_y, nwarps, need_check>; + static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma<mmq_x, mmq_y, nwarps>; + static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a<mmq_x, mmq_y, nwarps>; +}; + +DECL_MMQ_CASE(GGML_TYPE_IQ2_K_R4); |