// This file has been autogenerated by generate_cu_files.py, do not edit manually. #include "../mmq.cuh" template static __device__ __forceinline__ void load_tiles_iq4_k_r4( const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) { #ifdef INT8_MMA_AVAILABLE int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + WARP_SIZE*2); #else constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16; int * x_qs = (int *) x_tile; float * x_df = (float *) (x_qs + txs.qs); #endif // INT8_MMA_AVAILABLE const int kqsx = threadIdx.x/4; // 0...7 -> block of 32 uint32_t aux32[4]; const uint8_t * aux8 = (const uint8_t *)aux32; #pragma unroll for (int i0 = 0; i0 < mmq_y; i0 += 4*nwarps) { int i = i0 + 4*threadIdx.y + threadIdx.x%4; if (need_check) { i = min(i, i_max); } int i4 = i/4; int ir = i%4; const block_iq4_k_r4 * bxi = (const block_iq4_k_r4 *)(x + 4*i4*stride) + kbx0; const float d = __half2float(bxi->d[ir]); #pragma unroll for (int l = 0; l < 2; ++l) { auto values_l = iq4k_table + ((bxi->extra[ir+4*l] << (8 - kqsx)) & 0x100); const int ql1 = get_int_b4(bxi->qs, 16*kqsx + ir + 4*l + 0); const int ql2 = get_int_b4(bxi->qs, 16*kqsx + ir + 4*l + 8); aux32[0] = (ql1 >> 0) & 0x0f0f0f0f; aux32[1] = (ql1 >> 4) & 0x0f0f0f0f; aux32[2] = (ql2 >> 0) & 0x0f0f0f0f; aux32[3] = (ql2 >> 4) & 0x0f0f0f0f; #ifdef INT8_MMA_AVAILABLE x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 0] = int_from_table_x(aux8+ 0, values_l); x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 2] = int_from_table_x(aux8+ 4, values_l); x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 1] = int_from_table_x(aux8+ 8, values_l); x_qs[i*MMQ_MMA_TILE_X_K_Q3_K + 8*kqsx + 4*l + 3] = int_from_table_x(aux8+12, values_l); #else x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 0] = int_from_table_x(aux8+ 0, values_l); x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 2] = int_from_table_x(aux8+ 4, values_l); x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 1] = int_from_table_x(aux8+ 8, values_l); x_qs[i*(2*WARP_SIZE + 1) + 8*kqsx + 4*l + 3] = int_from_table_x(aux8+12, values_l); #endif // INT8_MMA_AVAILABLE } int is = 8*kqsx + ir; float dl1 = d * ((((bxi->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bxi->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); is += 4; float dl2 = d * ((((bxi->scales_l[is%32] >> 4*(is/32)) & 0xf) | (((bxi->scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); #ifdef INT8_MMA_AVAILABLE x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+0] = dl1; x_df[i*MMQ_MMA_TILE_X_K_Q3_K + 2*kqsx+1] = dl2; #else x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+0] = dl1; x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + 2*kqsx+1] = dl2; #endif // INT8_MMA_AVAILABLE } } template struct mmq_type_traits { static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_k_r4; static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_16_q8_1_mma; static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_16_q8_1_dp4a; }; DECL_MMQ_CASE(GGML_TYPE_IQ4_K_R4);