summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp97
1 files changed, 53 insertions, 44 deletions
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index ca75e0fd..f95ce061 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -2481,9 +2481,61 @@ static void mul_mat_iq4_nl_r4_q8_1(int n, const void * vx, size_t bx, const Data
}
#endif
+template <int nrc_y>
+static void mul_mat_q4_0_r4_q8_1_avx2(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ GGML_ASSERT(nrc_x%4 == 0);
+ Q8<nrc_y, block_q8_1_x4> q8(info);
+ auto m4 = _mm256_set1_epi8(0xf);
+ auto m1 = _mm256_set1_epi16(1);
+ int nb = n / QK4_NL;
+ GGML_ASSERT(nb%4 == 0);
+ __m256 acc[nrc_y] = {};
+ float d8[8*nrc_y];
+ for (int ix = 0; ix < nrc_x; ix += 4) {
+ const block_iq4_nl_r4 * iq4 = (const block_iq4_nl_r4 *)((const char *)vx + ix*bx);
+ for (int ib4 = 0; ib4 < nb/4; ++ib4) {
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto scales = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8.y[iy][ib4].d));
+ _mm256_storeu_ps(d8 + 8*iy, scales);
+ }
+ for (int k = 0; k < 4; ++k) {
+ auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4[4*ib4+k].d));
+ auto scales = _mm256_set_m128(scales128, scales128);
+ auto scales_m = _mm256_mul_ps(scales, _mm256_set1_ps(-4.f));
+ auto bits1 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+0);
+ auto bits2 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+1);
+ auto q1 = _mm256_and_si256(bits1, m4);
+ auto q2 = _mm256_and_si256(bits2, m4);
+ auto q3 = _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4);
+ auto q4 = _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4);
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k);
+ auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(q1, _mm256_shuffle_epi32(y, 0x00)),
+ _mm256_maddubs_epi16(q2, _mm256_shuffle_epi32(y, 0x55)));
+ auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(q3, _mm256_shuffle_epi32(y, 0xaa)),
+ _mm256_maddubs_epi16(q4, _mm256_shuffle_epi32(y, 0xff)));
+ auto sumi = _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2));
+ auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(d8[8*iy+k]));
+ acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc[iy]);
+ acc[iy] = _mm256_fmadd_ps(scales_m, _mm256_set1_ps(d8[8*iy+4+k]), acc[iy]);
+ }
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
+ info.store(ix, iy, sum);
+ acc[iy] = _mm256_setzero_ps();
+ }
+ }
+}
+
#ifdef HAVE_FANCY_SIMD
template <int nrc_y>
static void mul_mat_q4_0_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ if constexpr (nrc_y == 1) {
+ mul_mat_q4_0_r4_q8_1_avx2<1>(n, vx, bx, info, nrc_x);
+ return;
+ }
GGML_ASSERT(nrc_x%8 == 0);
Q8<nrc_y, block_q8_1_x4> q8(info);
auto m4 = _mm512_set1_epi8(0xf);
@@ -2537,50 +2589,7 @@ static void mul_mat_q4_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
#else
template <int nrc_y>
static void mul_mat_q4_0_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
- GGML_ASSERT(nrc_x%4 == 0);
- Q8<nrc_y, block_q8_1_x4> q8(info);
- auto m4 = _mm256_set1_epi8(0xf);
- auto m1 = _mm256_set1_epi16(1);
- int nb = n / QK4_NL;
- GGML_ASSERT(nb%4 == 0);
- __m256 acc[nrc_y] = {};
- float d8[8*nrc_y];
- for (int ix = 0; ix < nrc_x; ix += 4) {
- const block_iq4_nl_r4 * iq4 = (const block_iq4_nl_r4 *)((const char *)vx + ix*bx);
- for (int ib4 = 0; ib4 < nb/4; ++ib4) {
- for (int iy = 0; iy < nrc_y; ++iy) {
- auto scales = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)q8.y[iy][ib4].d));
- _mm256_storeu_ps(d8 + 8*iy, scales);
- }
- for (int k = 0; k < 4; ++k) {
- auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4[4*ib4+k].d));
- auto scales = _mm256_set_m128(scales128, scales128);
- auto scales_m = _mm256_mul_ps(scales, _mm256_set1_ps(-4.f));
- auto bits1 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+0);
- auto bits2 = _mm256_loadu_si256((const __m256i *)iq4[4*ib4+k].qs+1);
- auto q1 = _mm256_and_si256(bits1, m4);
- auto q2 = _mm256_and_si256(bits2, m4);
- auto q3 = _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4);
- auto q4 = _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4);
- for (int iy = 0; iy < nrc_y; ++iy) {
- auto y = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k);
- auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(q1, _mm256_shuffle_epi32(y, 0x00)),
- _mm256_maddubs_epi16(q2, _mm256_shuffle_epi32(y, 0x55)));
- auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(q3, _mm256_shuffle_epi32(y, 0xaa)),
- _mm256_maddubs_epi16(q4, _mm256_shuffle_epi32(y, 0xff)));
- auto sumi = _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2));
- auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(d8[8*iy+k]));
- acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc[iy]);
- acc[iy] = _mm256_fmadd_ps(scales_m, _mm256_set1_ps(d8[8*iy+4+k]), acc[iy]);
- }
- }
- }
- for (int iy = 0; iy < nrc_y; ++iy) {
- auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
- info.store(ix, iy, sum);
- acc[iy] = _mm256_setzero_ps();
- }
- }
+ mul_mat_q4_0_r4_q8_1_avx2<nrc_y>(n, vx, bx, info, nrc_x);
}
#endif