summaryrefslogtreecommitdiff
path: root/ggml/src/iqk/iqk_mul_mat.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'ggml/src/iqk/iqk_mul_mat.cpp')
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp280
1 files changed, 268 insertions, 12 deletions
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index f0e9d61d..6bfd4f77 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -3785,6 +3785,125 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn
}
}
+template <int nrc_y>
+static void mul_mat_iq4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ GGML_ASSERT(nrc_x%4 == 0);
+ Q8<nrc_y, block_q8_K> q8(info);
+ auto m4 = _mm256_set1_epi8(0xf);
+ auto m30 = _mm256_set1_epi8(0x30);
+ auto m32 = _mm256_set1_epi8(32);
+ auto ms = _mm256_set1_epi8(4);
+ //auto shift_shuffle = _mm256_set_epi64x(0x0303030302020202, 0x0101010100000000, 0x0303030302020202, 0x0101010100000000);
+ auto shift_shuffle = _mm256_set_epi64x(0x0707070706060606, 0x0505050504040404, 0x0303030302020202, 0x0101010100000000);
+#ifdef HAVE_FANCY_SIMD
+ auto values = load_iq4nl_values_256();
+ __m256 d4s[nrc_y];
+ static const uint8_t k_shuff[32] = {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15};
+ auto shuff = _mm256_loadu_si256((const __m256i *)k_shuff);
+#else
+ auto m1 = _mm256_set1_epi16(1);
+ auto values128 = _mm_loadu_si128((const __m128i *)iq4k_values);
+ auto values = MM256_SET_M128I(values128, values128);
+#endif
+ int nbl = n / QK_K;
+ __m256 acc[nrc_y] = {};
+ __m256i qx[4];
+ int8_t stored_scales[64];
+ for (int ix = 0; ix < nrc_x; ix += 4) {
+ const block_iq4_k_r4 * iq4 = (const block_iq4_k_r4 *)((const char *)vx + (ix+0)*bx);
+ for (int ibl = 0; ibl < nbl; ++ibl) { // Block of 256
+ auto dl = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4[ibl].d));
+ auto d4 = _mm256_set_m128(dl, dl);
+ auto extra = _mm256_set1_epi64x(*(const uint64_t *)iq4[ibl].extra);
+#ifdef HAVE_FANCY_SIMD
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ d4s[iy] = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(iy, ibl)));
+ }
+#else
+ if constexpr (nrc_y == 1) {
+ d4 = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(0, ibl)));
+ }
+#endif
+ auto slbits = _mm256_loadu_si256((const __m256i *)iq4[ibl].scales_l);
+ auto sl1 = _mm256_and_si256(slbits, m4);
+ auto sl2 = _mm256_and_si256(_mm256_srli_epi16(slbits, 4), m4);
+ auto shbits = _mm_loadu_si128((const __m128i*)iq4[ibl].scales_h);
+ auto sh = MM256_SET_M128I(_mm_srli_epi16(shbits, 2), shbits);
+ auto i8scales1 = _mm256_sub_epi8(_mm256_or_si256(sl1, _mm256_and_si256(m30, _mm256_slli_epi16(sh, 4))), m32);
+ auto i8scales2 = _mm256_sub_epi8(_mm256_or_si256(sl2, _mm256_and_si256(m30, sh)), m32);
+ _mm256_storeu_si256((__m256i *)stored_scales+0, i8scales1);
+ _mm256_storeu_si256((__m256i *)stored_scales+1, i8scales2);
+#ifdef HAVE_FANCY_SIMD
+ {
+ auto t1 = _mm256_shuffle_epi8(_mm256_cvtepi8_epi16(_mm256_extracti128_si256(i8scales1, 0)), shuff); // blocks 0, 1, 2, 3 for each row
+ auto t2 = _mm256_shuffle_epi8(_mm256_cvtepi8_epi16(_mm256_extracti128_si256(i8scales1, 1)), shuff); // blocks 4, 5, 6, 7 for each row
+ auto t3 = _mm256_shuffle_epi8(_mm256_cvtepi8_epi16(_mm256_extracti128_si256(i8scales2, 0)), shuff); // blocks 8, 9, 10, 11 for each row
+ auto t4 = _mm256_shuffle_epi8(_mm256_cvtepi8_epi16(_mm256_extracti128_si256(i8scales2, 1)), shuff); // blocks 12, 13, 14, 15 for each row
+ auto s1 = MM256_SET_M128I(_mm256_extracti128_si256(t3, 0), _mm256_extracti128_si256(t1, 0)); // blocks 0, 1, 8, 9
+ auto s2 = MM256_SET_M128I(_mm256_extracti128_si256(t3, 1), _mm256_extracti128_si256(t1, 1)); // blocks 2, 3, 10, 11
+ auto s3 = MM256_SET_M128I(_mm256_extracti128_si256(t4, 0), _mm256_extracti128_si256(t2, 0)); // blocks 4, 5, 12, 13
+ auto s4 = MM256_SET_M128I(_mm256_extracti128_si256(t4, 1), _mm256_extracti128_si256(t2, 1)); // blocks 6, 7, 14, 15
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto bsums = q8.load_bsums(iy, ibl);
+ auto sumi = _mm256_setzero_si256();
+ sumi = _mm256_dpwssd_epi32(sumi, s1, _mm256_shuffle_epi32(bsums, 0x00));
+ sumi = _mm256_dpwssd_epi32(sumi, s2, _mm256_shuffle_epi32(bsums, 0x55));
+ sumi = _mm256_dpwssd_epi32(sumi, s3, _mm256_shuffle_epi32(bsums, 0xaa));
+ sumi = _mm256_dpwssd_epi32(sumi, s4, _mm256_shuffle_epi32(bsums, 0xff));
+ acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(d4s[iy], _mm256_set1_ps(-128.f)), _mm256_cvtepi32_ps(sumi), acc[iy]);
+ }
+ }
+#endif
+ for (int ib = 0; ib < QK_K/32; ++ib) {
+ auto iscales = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i *)(stored_scales + 8*ib)));
+#ifdef HAVE_FANCY_SIMD
+ auto scales = _mm256_cvtepi32_ps(iscales);
+#else
+ auto scales = _mm256_mul_ps(d4, _mm256_cvtepi32_ps(iscales));
+#endif
+ auto bits1 = _mm256_loadu_si256((const __m256i *)iq4[ibl].qs+2*ib+0);
+ auto bits2 = _mm256_loadu_si256((const __m256i *)iq4[ibl].qs+2*ib+1);
+ auto shift = _mm256_and_si256(ms, _mm256_slli_epi16(extra, 2)); extra = _mm256_srli_epi16(extra, 1);
+ shift = _mm256_shuffle_epi8(shift, shift_shuffle);
+ qx[0] = _mm256_add_epi8(shift, _mm256_shuffle_epi8(values, _mm256_and_si256(bits1, m4)));
+ qx[1] = _mm256_add_epi8(shift, _mm256_shuffle_epi8(values, _mm256_and_si256(bits2, m4)));
+ qx[2] = _mm256_add_epi8(shift, _mm256_shuffle_epi8(values, _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4)));
+ qx[3] = _mm256_add_epi8(shift, _mm256_shuffle_epi8(values, _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4)));
+#ifndef HAVE_FANCY_SIMD
+ auto s1 = _mm256_sign_epi8(qx[0], qx[0]);
+ auto s2 = _mm256_sign_epi8(qx[1], qx[1]);
+ auto s3 = _mm256_sign_epi8(qx[2], qx[2]);
+ auto s4 = _mm256_sign_epi8(qx[3], qx[3]);
+#endif
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = _mm256_loadu_si256((const __m256i*)q8.y[iy][ibl].qs+ib);
+#ifdef HAVE_FANCY_SIMD
+ auto sumi = _mm256_setzero_si256();
+ sumi = _mm256_dpbusd_epi32(sumi, qx[0], _mm256_shuffle_epi32(y, 0x00));
+ sumi = _mm256_dpbusd_epi32(sumi, qx[1], _mm256_shuffle_epi32(y, 0x55));
+ sumi = _mm256_dpbusd_epi32(sumi, qx[2], _mm256_shuffle_epi32(y, 0xaa));
+ sumi = _mm256_dpbusd_epi32(sumi, qx[3], _mm256_shuffle_epi32(y, 0xff));
+ acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(scales, d4s[iy]), _mm256_cvtepi32_ps(sumi), acc[iy]);
+#else
+ auto sumi1 = _mm256_maddubs_epi16(s1, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x00), qx[0]));
+ auto sumi2 = _mm256_maddubs_epi16(s2, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x55), qx[1]));
+ auto sumi3 = _mm256_maddubs_epi16(s3, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xaa), qx[2]));
+ auto sumi4 = _mm256_maddubs_epi16(s4, _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xff), qx[3]));
+ auto sumi = _mm256_add_epi32(_mm256_add_epi32(_mm256_madd_epi16(m1, sumi1), _mm256_madd_epi16(m1, sumi2)),
+ _mm256_add_epi32(_mm256_madd_epi16(m1, sumi3), _mm256_madd_epi16(m1, sumi4)));
+ acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(scales, _mm256_set1_ps(q8.scale(iy, ibl))), _mm256_cvtepi32_ps(sumi), acc[iy]);
+#endif
+ }
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
+ acc[iy] = _mm256_setzero_ps();
+ info.store(ix+0, iy, sum);
+ }
+ }
+}
+
template <typename Bits>
inline void multiply_add_1(int j, const Bits& bits, const __m256i * scales, const __m256i * q8, __m256i * sumi) {
if (j == 0) {
@@ -5804,18 +5923,6 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
mm.funcs[7] = mul_mat_q3_k_r4_q8_k<8>;
expected_typeB = GGML_TYPE_Q8_K;
break;
- case GGML_TYPE_Q4_K_R4:
- assert (ne00 % QK_K == 0);
- mm.funcs[0] = mul_mat_q4_k_r4_q8_k<1>;
- mm.funcs[1] = mul_mat_q4_k_r4_q8_k<2>;
- mm.funcs[2] = mul_mat_q4_k_r4_q8_k<3>;
- mm.funcs[3] = mul_mat_q4_k_r4_q8_k<4>;
- mm.funcs[4] = mul_mat_q4_k_r4_q8_k<5>;
- mm.funcs[5] = mul_mat_q4_k_r4_q8_k<6>;
- mm.funcs[6] = mul_mat_q4_k_r4_q8_k<7>;
- mm.funcs[7] = mul_mat_q4_k_r4_q8_k<8>;
- expected_typeB = GGML_TYPE_Q8_K32;
- break;
case GGML_TYPE_Q5_K_R4:
assert (ne00 % QK_K == 0);
mm.funcs[0] = mul_mat_q5_k_r4_q8_k<1>;
@@ -5840,6 +5947,18 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
mm.funcs[7] = mul_mat_q6_k_r4_q8_k<8>;
expected_typeB = GGML_TYPE_Q8_K;
break;
+ case GGML_TYPE_IQ4_K_R4:
+ assert (ne00 % QK_K == 0);
+ mm.funcs[0] = mul_mat_iq4_k_r4_q8_k<1>;
+ mm.funcs[1] = mul_mat_iq4_k_r4_q8_k<2>;
+ mm.funcs[2] = mul_mat_iq4_k_r4_q8_k<3>;
+ mm.funcs[3] = mul_mat_iq4_k_r4_q8_k<4>;
+ mm.funcs[4] = mul_mat_iq4_k_r4_q8_k<5>;
+ mm.funcs[5] = mul_mat_iq4_k_r4_q8_k<6>;
+ mm.funcs[6] = mul_mat_iq4_k_r4_q8_k<7>;
+ mm.funcs[7] = mul_mat_iq4_k_r4_q8_k<8>;
+ expected_typeB = GGML_TYPE_Q8_K;
+ break;
case GGML_TYPE_Q4_0_R4:
assert (ne00 % QK4_NL == 0);
mm.funcs[0] = mul_mat_q4_0_r4_q8_1<1>;
@@ -8516,6 +8635,139 @@ void mul_mat_iq4_xs_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& i
}
}
+template <int nrc_y>
+void mul_mat_iq4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ GGML_ASSERT(nrc_x%4 == 0);
+ Q8<nrc_y, block_q8_K> q8(info);
+ auto m4 = vdupq_n_u8(0xf);
+ auto m3 = vdupq_n_u8(0x30);
+ auto ms = vdupq_n_u8(4);
+ auto m32 = vdupq_n_s8(-32);
+ uint8x16x2_t shift_shuffle = {
+ vreinterpretq_u8_u64(uint64x2_t{0x0101010100000000, 0x0303030302020202}),
+ vreinterpretq_u8_u64(uint64x2_t{0x0505050504040404, 0x0707070706060606})
+ };
+ auto values = vld1q_s8(iq4k_values);
+ int nbl = n / QK_K;
+ int8x16_t qx[4];
+ int8x16x4_t i8scales;
+ int16x8x4_t i16scales;
+ float32x4_t acc[nrc_y] = {};
+ for (int ix = 0; ix < nrc_x; ix += 4) {
+ const block_iq4_k_r4 * iq4 = (const block_iq4_k_r4 *)((const char *)vx + ix*bx);
+ for (int ibl = 0; ibl < nbl; ++ibl) {
+ auto d4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq4[ibl].d));
+ auto extra8 = vld1_u8(iq4[ibl].extra);
+ uint8x16_t extra;
+ if constexpr (nrc_y == 1) {
+ extra = vcombine_u8(extra8, vshr_n_u8(extra8,1));
+ } else {
+ extra = vcombine_u8(extra8, extra8);
+ }
+ auto sl = vld1q_u8_x2(iq4[ibl].scales_l);
+ auto sh = vld1q_u8(iq4[ibl].scales_h);
+ i8scales.val[0] = vaddq_s8(vorrq_u8(vandq_u8(sl.val[0], m4), vandq_u8(vshlq_n_u8(sh, 4), m3)), m32);
+ i8scales.val[1] = vaddq_s8(vorrq_u8(vandq_u8(sl.val[1], m4), vandq_u8(vshlq_n_u8(sh, 2), m3)), m32);
+ i8scales.val[2] = vaddq_s8(vorrq_u8(vshrq_n_u8(sl.val[0], 4), vandq_u8(sh, m3)), m32);
+ i8scales.val[3] = vaddq_s8(vorrq_u8(vshrq_n_u8(sl.val[1], 4), vandq_u8(vshrq_n_u8(sh, 2), m3)), m32);
+ int32x4_t isum[nrc_y] = {};
+ if constexpr (nrc_y == 1) {
+ auto s8_1 = vmulq_s8(i8scales.val[0], vandq_u8(ms, vshlq_n_u8(extra, 2)));
+ auto s8_2 = vmulq_s8(i8scales.val[1], vandq_u8(ms, extra));
+ auto s16_1 = vmovl_s8(vget_low_s8 (s8_1));
+ auto s16_2 = vmovl_s8(vget_high_s8(s8_1));
+ auto s16_3 = vmovl_s8(vget_low_s8 (s8_2));
+ auto s16_4 = vmovl_s8(vget_high_s8(s8_2));
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto b8 = vld1_s16(q8.y[iy][ibl].bsums);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_1), b8, 0);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_1), b8, 1);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_2), b8, 2);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_2), b8, 3);
+ b8 = vld1_s16(q8.y[iy][ibl].bsums+4);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_3), b8, 0);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_3), b8, 1);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_4), b8, 2);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_4), b8, 3);
+ }
+ s8_1 = vmulq_s8(i8scales.val[2], vandq_u8(ms, vshrq_n_u8(extra, 2)));
+ s8_2 = vmulq_s8(i8scales.val[3], vandq_u8(ms, vshrq_n_u8(extra, 4)));
+ s16_1 = vmovl_s8(vget_low_s8 (s8_1));
+ s16_2 = vmovl_s8(vget_high_s8(s8_1));
+ s16_3 = vmovl_s8(vget_low_s8 (s8_2));
+ s16_4 = vmovl_s8(vget_high_s8(s8_2));
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto b8 = vld1_s16(q8.y[iy][ibl].bsums+8);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_1), b8, 0);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_1), b8, 1);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_2), b8, 2);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_2), b8, 3);
+ b8 = vld1_s16(q8.y[iy][ibl].bsums+12);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_3), b8, 0);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_3), b8, 1);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_low_s16 (s16_4), b8, 2);
+ isum[iy] = vmlal_lane_s16(isum[iy], vget_high_s16(s16_4), b8, 3);
+ }
+ }
+ for (int is = 0; is < 2; ++is) {
+ i16scales.val[0] = vmovl_s8(vget_low_s8 (i8scales.val[2*is+0]));
+ i16scales.val[1] = vmovl_s8(vget_high_s8(i8scales.val[2*is+0]));
+ i16scales.val[2] = vmovl_s8(vget_low_s8 (i8scales.val[2*is+1]));
+ i16scales.val[3] = vmovl_s8(vget_high_s8(i8scales.val[2*is+1]));
+ for (int ib = 0; ib < 4; ++ib) {
+ auto bits = vld1q_u8_x4(iq4[ibl].qs + 256*is + 64*ib);
+ uint8x16_t shifts;
+ if constexpr (nrc_y == 1) {
+ qx[0] = vqtbl1q_s8(values, vandq_u8(bits.val[0], m4)); // 0...3 from the 4 rows
+ qx[1] = vqtbl1q_s8(values, vandq_u8(bits.val[2], m4)); // 4...7
+ qx[2] = vqtbl1q_s8(values, vshrq_n_u8(bits.val[0], 4)); // 8..11
+ qx[3] = vqtbl1q_s8(values, vshrq_n_u8(bits.val[2], 4)); // 12..15
+ } else {
+ shifts = vandq_u8(ms, vshlq_n_u8(extra, 2));
+ auto shift = vqtbl1q_u8(shifts, shift_shuffle.val[0]);
+ extra = vshrq_n_u8(extra, 1);
+ qx[0] = vaddq_s8(shift, vqtbl1q_s8(values, vandq_u8(bits.val[0], m4))); // 0...3 from the 4 rows
+ qx[1] = vaddq_s8(shift, vqtbl1q_s8(values, vandq_u8(bits.val[2], m4))); // 4...7
+ qx[2] = vaddq_s8(shift, vqtbl1q_s8(values, vshrq_n_u8(bits.val[0], 4))); // 8..11
+ qx[3] = vaddq_s8(shift, vqtbl1q_s8(values, vshrq_n_u8(bits.val[2], 4))); // 12..15
+ }
+ auto scales = vmovl_s16(vget_low_s16 (i16scales.val[ib]));
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = vld1q_s8(q8.y[iy][ibl].qs+128*is+32*ib);
+ auto sumi = interleaved_dotq(qx, y);
+ isum[iy] = vmlaq_s32(isum[iy], scales, sumi);
+ }
+ if constexpr (nrc_y == 1) {
+ qx[0] = vqtbl1q_s8(values, vandq_u8(bits.val[1], m4)); // 16..19
+ qx[1] = vqtbl1q_s8(values, vandq_u8(bits.val[3], m4)); // 20..23
+ qx[2] = vqtbl1q_s8(values, vshrq_n_u8(bits.val[1], 4)); // 24..27
+ qx[3] = vqtbl1q_s8(values, vshrq_n_u8(bits.val[3], 4)); // 28..31
+ } else {
+ auto shift = vqtbl1q_u8(shifts, shift_shuffle.val[1]);
+ qx[0] = vaddq_s8(shift, vqtbl1q_s8(values, vandq_u8(bits.val[1], m4))); // 16..19
+ qx[1] = vaddq_s8(shift, vqtbl1q_s8(values, vandq_u8(bits.val[3], m4))); // 20..23
+ qx[2] = vaddq_s8(shift, vqtbl1q_s8(values, vshrq_n_u8(bits.val[1], 4))); // 24..27
+ qx[3] = vaddq_s8(shift, vqtbl1q_s8(values, vshrq_n_u8(bits.val[3], 4))); // 28..31
+ }
+ scales = vmovl_s16(vget_high_s16(i16scales.val[ib]));
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = vld1q_s8(q8.y[iy][ibl].qs+128*is+32*ib+16);
+ auto sumi = interleaved_dotq(qx, y);
+ isum[iy] = vmlaq_s32(isum[iy], scales, sumi);
+ }
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(d4, vdupq_n_f32(q8.scale(iy, ibl))), vcvtq_f32_s32(isum[iy]));
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ info.store(ix, iy, acc[iy]);
+ acc[iy] = vdupq_n_f32(0.f);
+ }
+ }
+}
+
IQK_ALWAYS_INLINE void prepare_q4_k_quants(const uint8x16_t& m4, const uint8x16x4_t& bits, int8x16_t * qx) {
qx[0] = vandq_u8(bits.val[0], m4); // 0...3 from the 4 rows
qx[1] = vandq_u8(bits.val[1], m4); // 16..19
@@ -9294,6 +9546,10 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
SET_MUL_MAT_FUNCTIONS(m, mul_mat_q6_k_r4_q8_k);
expected_Btype = GGML_TYPE_Q8_K;
break;
+ case GGML_TYPE_IQ4_K_R4:
+ SET_MUL_MAT_FUNCTIONS(m, mul_mat_iq4_k_r4_q8_k);
+ expected_Btype = GGML_TYPE_Q8_K;
+ break;
case GGML_TYPE_Q4_0_R4:
SET_MUL_MAT_FUNCTIONS_T(m, mul_mat_qx_r4_q8_0, Q4_0_R4_Dequantizer);
expected_Btype = GGML_TYPE_Q8_0;