// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" template static __device__ __forceinline__ void load_tiles_iq1_kt( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { constexpr uint32_t ka = 0xCBAC1FED; constexpr uint32_t km = 0x3f3f3f3f; #ifdef INT8_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y); int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // INT8_MMA_AVAILABLE const int kqsx = threadIdx.x; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps) { int i = i0 + threadIdx.y; if (need_check) { i = min(i, i_max); } const block_iq1_kt * bxi = (const block_iq1_kt *)(x + i*stride + sizeof(float)) + kbx0; int ib32 = kqsx/4; int j = kqsx%4; uint32_t val = bxi->ql[kqsx] + ((bxi->qh[kqsx%16] << (8 - 4*(kqsx/16))) & 0xf00) + ((bxi->sh[kqsx/4] << (8 - (kqsx%4))) & 0x1000) + 4096; int2 v = {0, 0}; for (int k = 0; k < 4; ++k) { val *= ka; v.x |= (ggml_cuda_dp4a(val & km, 0x01010101, -126) & 0xff) << 8*k; } for (int k = 0; k < 4; ++k) { val *= ka; v.y |= (ggml_cuda_dp4a(val & km, 0x01010101, -126) & 0xff) << 8*k; } #ifdef INT8_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*ib32 + 2*j + 0] = v.x; x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + 8*ib32 + 2*j + 1] = v.y; #else x_qs[i*(2*WARP_SIZE + 1) + 8*ib32 + 2*j + 0] = v.x; x_qs[i*(2*WARP_SIZE + 1) + 8*ib32 + 2*j + 1] = v.y; #endif // INT8_MMA_AVAILABLE } #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) { int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4); if (need_check) { i = min(i, i_max); } const float * dptr = (const float *)(x + i*stride); const float d = dptr[0]; const block_iq1_kt * bxi = (const block_iq1_kt *)(dptr + 1) + kbx0; const int ls = iq4k_values[bxi->sh[threadIdx.x % 8] & 0xf]; #ifdef INT8_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = d * ls; #else x_df[i*(WARP_SIZE/4) + i/4 + threadIdx.x % 8] = d * ls; #endif // INT8_MMA_AVAILABLE } } template struct mmq_type_traits { static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq1_kt; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a; }; DECL_MMQ_CASE(GGML_TYPE_IQ1_KT);