diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-12-13 15:47:59 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-12-13 15:47:59 +0100 |
commit | 12f962dd2494b743deb1c671974a591fdef1f003 (patch) | |
tree | c38275bca25b37c1d036d20f40abbe11237f17c3 | |
parent | 36efbfb132088949630f7951fabcd9664e563fb4 (diff) |
Faster R4 quants on Zen4 (#139)
* q3_k_r4: faster Zen4
* q3_k_r4: faster Zen4
256.2 -> 272.7 t/s for PP-512
* q6_k_r4: faster Zen4
243.2 -> 261.3 t/s for PP-512
* q4_k_r4: slightly faster Zen4
262.4 t/s -> 268.1 t/s
* q5_k_r4: slightly faster Zen4
248.3 t/s -> 256.7 t/s
* iq4_xs_r4: slightly faster Zen4
256.8 t/s -> 272.0 t/s
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
-rw-r--r-- | ggml/src/iqk/iqk_mul_mat.cpp | 119 |
1 files changed, 68 insertions, 51 deletions
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp index 84a4b619..3f448275 100644 --- a/ggml/src/iqk/iqk_mul_mat.cpp +++ b/ggml/src/iqk/iqk_mul_mat.cpp @@ -3011,7 +3011,7 @@ static void mul_mat_iq4_xs_r4_q8_k(int n, const void * vx, size_t bx, const Data using helper_t = union { __m512i vec; uint32_t val[16]; }; helper_t h; __m512 acc[nrc_y] = {}; - __m512 d4s[nrc_y]; + __m512i isum[nrc_y] = {}; __m512i qx[4]; for (int ix = 0; ix < nrc_x; ix += 8) { const block_iq4_xs_r4 * iq4l = (const block_iq4_xs_r4 *)((const char *)vx + (ix+0)*bx); @@ -3020,10 +3020,7 @@ static void mul_mat_iq4_xs_r4_q8_k(int n, const void * vx, size_t bx, const Data auto dl = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4l[ibl].d)); auto dh = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq4h[ibl].d)); auto d4 = _mm512_insertf32x8(_mm512_castps256_ps512(_mm256_set_m128(dl, dl)), _mm256_set_m128(dh, dh), 1); - for (int iy = 0; iy < nrc_y; ++iy) { - d4s[iy] = _mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl))); - } - d4 = _mm512_mul_ps(d4, _mm512_set1_ps(-64.f)); + auto d4x64 = _mm512_mul_ps(d4, _mm512_set1_ps(-64.f)); auto slbits_l = _mm_loadu_si128((const __m128i *)iq4l[ibl].scales_l); auto shbits_l = _mm_loadu_si128((const __m128i *)iq4h[ibl].scales_l); auto sl_l = MM256_SET_M128I(_mm_srli_epi16(slbits_l, 4), slbits_l); @@ -3040,7 +3037,7 @@ static void mul_mat_iq4_xs_r4_q8_k(int n, const void * vx, size_t bx, const Data for (int ib = 0; ib < QK_K/32; ++ib) { auto iscales = _mm512_cvtepi8_epi32(_mm_blend_epi32(_mm_set1_epi32(h.val[ib+0]), _mm_set1_epi32(h.val[ib+8]), 0x0c)); auto scales = _mm512_cvtepi32_ps(iscales); - auto scales_m = _mm512_mul_ps(scales, d4); + auto scales_m = _mm512_mul_ps(scales, d4x64); auto bits1 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[ibl].qs+2*ib+0)), _mm256_loadu_si256((const __m256i *)iq4h[ibl].qs+2*ib+0), 1); auto bits2 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[ibl].qs+2*ib+1)), @@ -3057,11 +3054,15 @@ static void mul_mat_iq4_xs_r4_q8_k(int n, const void * vx, size_t bx, const Data sumi = _mm512_dpbusd_epi32(sumi, qx[1], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55))); sumi = _mm512_dpbusd_epi32(sumi, qx[2], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa))); sumi = _mm512_dpbusd_epi32(sumi, qx[3], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff))); + isum[iy] = _mm512_add_epi32(isum[iy], _mm512_mullo_epi32(iscales, sumi)); float m8 = ((const float *)q8.y[iy][ibl].bsums)[ib]; - acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(scales, d4s[iy]), _mm512_cvtepi32_ps(sumi), acc[iy]); acc[iy] = _mm512_fmadd_ps(scales_m, _mm512_set1_ps(m8), acc[iy]); } } + for (int iy = 0; iy < nrc_y; ++iy) { + acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl))), _mm512_cvtepi32_ps(isum[iy]), acc[iy]); + isum[iy] = _mm512_setzero_si512(); + } } for (int iy = 0; iy < nrc_y; ++iy) { auto sum1 = _mm_add_ps(_mm512_extractf32x4_ps(acc[iy], 0), _mm512_extractf32x4_ps(acc[iy], 1)); @@ -3176,7 +3177,7 @@ static void mul_mat_q4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn using helper_t = union { __m512i vec; uint32_t val[16]; }; helper_t hd, hm; __m512 acc[nrc_y] = {}; - __m512 d4s[nrc_y]; + __m512i isum[nrc_y] = {}; __m512i qx[4]; for (int ix = 0; ix < nrc_x; ix += 8) { const block_q4_k_r4 * iq4l = (const block_q4_k_r4 *)((const char *)vx + (ix+0)*bx); @@ -3189,9 +3190,6 @@ static void mul_mat_q4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn auto dh = _mm256_castps256_ps128(d2); auto mh = _mm256_extractf128_ps(d2, 1); auto d4 = _mm512_insertf32x8(_mm512_castps256_ps512(_mm256_set_m128(dl, dl)), _mm256_set_m128(dh, dh), 1); - for (int iy = 0; iy < nrc_y; ++iy) { - d4s[iy] = _mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl))); - } auto m4 = _mm512_insertf32x8(_mm512_castps256_ps512(_mm256_set_m128(ml, ml)), _mm256_set_m128(mh, mh), 1); m4 = _mm512_mul_ps(m4, _mm512_set1_ps(-0.5f)); auto slbits_l = _mm256_loadu_si256((const __m256i *)iq4l[ibl].scales_l); @@ -3212,11 +3210,10 @@ static void mul_mat_q4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn auto scales1 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hd.val[ib+0])); auto scales2 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hd.val[ib+8])); auto iscales = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1); - auto scales = _mm512_cvtepi32_ps(iscales); scales1 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hm.val[ib+0])); scales2 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hm.val[ib+8])); - iscales = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1); - auto scales_m = _mm512_mul_ps(m4, _mm512_cvtepi32_ps(iscales)); + auto iscales_m = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1); + auto scales_m = _mm512_mul_ps(m4, _mm512_cvtepi32_ps(iscales_m)); auto bits1 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[ibl].qs+2*ib+0)), _mm256_loadu_si256((const __m256i *)iq4h[ibl].qs+2*ib+0), 1); auto bits2 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq4l[ibl].qs+2*ib+1)), @@ -3233,11 +3230,15 @@ static void mul_mat_q4_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm512_dpbusd_epi32(sumi, qx[1], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55))); sumi = _mm512_dpbusd_epi32(sumi, qx[2], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa))); sumi = _mm512_dpbusd_epi32(sumi, qx[3], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff))); - acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(scales, d4s[iy]), _mm512_cvtepi32_ps(sumi), acc[iy]); + isum[iy] = _mm512_add_epi32(isum[iy], _mm512_mullo_epi32(iscales, sumi)); float m8 = ((const float *)q8.y[iy][ibl].bsums)[ib]; acc[iy] = _mm512_fmadd_ps(scales_m, _mm512_set1_ps(m8), acc[iy]); } } + for (int iy = 0; iy < nrc_y; ++iy) { + acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl))), _mm512_cvtepi32_ps(isum[iy]), acc[iy]); + isum[iy] = _mm512_setzero_si512(); + } } for (int iy = 0; iy < nrc_y; ++iy) { auto sum1 = _mm_add_ps(_mm512_extractf32x4_ps(acc[iy], 0), _mm512_extractf32x4_ps(acc[iy], 1)); @@ -3355,7 +3356,7 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn using helper_t = union { __m512i vec; uint32_t val[16]; }; helper_t hd, hm; __m512 acc[nrc_y] = {}; - __m512 d4s[nrc_y]; + __m512i isum[nrc_y] = {}; __m512i qx[4]; for (int ix = 0; ix < nrc_x; ix += 8) { const block_q5_k_r4 * iq5l = (const block_q5_k_r4 *)((const char *)vx + (ix+0)*bx); @@ -3368,9 +3369,6 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn auto dh = _mm256_castps256_ps128(d2); auto mh = _mm256_extractf128_ps(d2, 1); auto d4 = _mm512_insertf32x8(_mm512_castps256_ps512(_mm256_set_m128(dl, dl)), _mm256_set_m128(dh, dh), 1); - for (int iy = 0; iy < nrc_y; ++iy) { - d4s[iy] = _mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl))); - } auto m4 = _mm512_insertf32x8(_mm512_castps256_ps512(_mm256_set_m128(ml, ml)), _mm256_set_m128(mh, mh), 1); m4 = _mm512_mul_ps(m4, _mm512_set1_ps(-0.5f)); auto slbits_l = _mm256_loadu_si256((const __m256i *)iq5l[ibl].scales_l); @@ -3391,11 +3389,10 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn auto scales1 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hd.val[ib+0])); auto scales2 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hd.val[ib+8])); auto iscales = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1); - auto scales = _mm512_cvtepi32_ps(iscales); scales1 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hm.val[ib+0])); scales2 = _mm256_cvtepi8_epi32(_mm_set1_epi32(hm.val[ib+8])); - iscales = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1); - auto scales_m = _mm512_mul_ps(m4, _mm512_cvtepi32_ps(iscales)); + auto iscales_m = _mm512_inserti32x8(_mm512_castsi256_si512(scales1), scales2, 1); + auto scales_m = _mm512_mul_ps(m4, _mm512_cvtepi32_ps(iscales_m)); auto lbits1 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq5l[ibl].qs+2*ib+0)), _mm256_loadu_si256((const __m256i *)iq5h[ibl].qs+2*ib+0), 1); auto lbits2 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq5l[ibl].qs+2*ib+1)), @@ -3417,11 +3414,15 @@ static void mul_mat_q5_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm512_dpbusd_epi32(sumi, qx[1], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55))); sumi = _mm512_dpbusd_epi32(sumi, qx[2], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa))); sumi = _mm512_dpbusd_epi32(sumi, qx[3], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff))); - acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(scales, d4s[iy]), _mm512_cvtepi32_ps(sumi), acc[iy]); + isum[iy] = _mm512_add_epi32(isum[iy], _mm512_mullo_epi32(iscales, sumi)); float m8 = ((const float *)q8.y[iy][ibl].bsums)[ib]; acc[iy] = _mm512_fmadd_ps(scales_m, _mm512_set1_ps(m8), acc[iy]); } } + for (int iy = 0; iy < nrc_y; ++iy) { + acc[iy] = _mm512_fmadd_ps(_mm512_mul_ps(d4, _mm512_set1_ps(q8.scale(iy, ibl))), _mm512_cvtepi32_ps(isum[iy]), acc[iy]); + isum[iy] = _mm512_setzero_si512(); + } } for (int iy = 0; iy < nrc_y; ++iy) { auto sum1 = _mm_add_ps(_mm512_extractf32x4_ps(acc[iy], 0), _mm512_extractf32x4_ps(acc[iy], 1)); @@ -3449,7 +3450,7 @@ static void mul_mat_q2_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn static const uint8_t k_shuff[32] = {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15}; auto shuff = _mm256_loadu_si256((const __m256i *)k_shuff); #ifdef HAVE_FANCY_SIMD - __m256 d4s[nrc_y]; + __m256i isum[nrc_y] = {}; #else auto m1 = _mm256_set1_epi16(1); #endif @@ -3487,7 +3488,6 @@ static void mul_mat_q2_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm256_dpwssd_epi32(sumi, s4, _mm256_shuffle_epi32(bsums, 0xff)); auto d8 = _mm256_set1_ps(q8.scale(iy, ibl)); acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(m4, d8), _mm256_cvtepi32_ps(sumi), acc[iy]); - d4s[iy] = _mm256_mul_ps(d4, d8); #else sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(s1, _mm256_shuffle_epi32(bsums, 0x00))); sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(s2, _mm256_shuffle_epi32(bsums, 0x55))); @@ -3507,9 +3507,7 @@ static void mul_mat_q2_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn _mm256_storeu_si256((__m256i *)scales+1, all_scales2); for (int ib = 0; ib < QK_K/32; ++ib) { auto iscales = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i *)(scales + 8*ib))); -#ifdef HAVE_FANCY_SIMD - auto scales = _mm256_cvtepi32_ps(iscales); -#else +#ifndef HAVE_FANCY_SIMD auto scales = _mm256_mul_ps(d4, _mm256_cvtepi32_ps(iscales)); #endif auto lb = _mm256_loadu_si256((const __m256i *)iq2[ibl].qs+ib); @@ -3525,7 +3523,7 @@ static void mul_mat_q2_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm256_dpbusd_epi32(sumi, qx[1], _mm256_shuffle_epi32(y, 0x55)); sumi = _mm256_dpbusd_epi32(sumi, qx[2], _mm256_shuffle_epi32(y, 0xaa)); sumi = _mm256_dpbusd_epi32(sumi, qx[3], _mm256_shuffle_epi32(y, 0xff)); - acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(scales, d4s[iy]), _mm256_cvtepi32_ps(sumi), acc[iy]); + isum[iy] = _mm256_add_epi32(isum[iy], _mm256_mullo_epi32(iscales, sumi)); #else auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x00)), _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0x55))); @@ -3541,6 +3539,13 @@ static void mul_mat_q2_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn #endif } } +#ifdef HAVE_FANCY_SIMD + for (int iy = 0; iy < nrc_y; ++iy) { + auto d4y = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(iy, ibl))); + acc[iy] = _mm256_fmadd_ps(d4y, _mm256_cvtepi32_ps(isum[iy]), acc[iy]); + isum[iy] = _mm256_setzero_si256(); + } +#endif } for (int iy = 0; iy < nrc_y; ++iy) { auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1)); @@ -3562,7 +3567,7 @@ static void mul_mat_q3_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn static const uint8_t k_shuff[32] = {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15}; auto shuff = _mm256_loadu_si256((const __m256i *)k_shuff); #ifdef HAVE_FANCY_SIMD - __m256 d4s[nrc_y]; + __m256i isum[nrc_y]; #else auto m1 = _mm256_set1_epi16(1); #endif @@ -3575,11 +3580,7 @@ static void mul_mat_q3_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn for (int ibl = 0; ibl < nbl; ++ibl) { // Block of 256 auto dl = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq3[ibl].d)); auto d4 = _mm256_set_m128(dl, dl); -#ifdef HAVE_FANCY_SIMD - for (int iy = 0; iy < nrc_y; ++iy) { - d4s[iy] = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(iy, ibl))); - } -#else +#ifndef HAVE_FANCY_SIMD if constexpr (nrc_y == 1) { d4 = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(0, ibl))); } @@ -3603,6 +3604,12 @@ static void mul_mat_q3_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn auto s2 = MM256_SET_M128I(_mm256_extracti128_si256(t3, 1), _mm256_extracti128_si256(t1, 1)); // blocks 2, 3, 10, 11 auto s3 = MM256_SET_M128I(_mm256_extracti128_si256(t4, 0), _mm256_extracti128_si256(t2, 0)); // blocks 4, 5, 12, 13 auto s4 = MM256_SET_M128I(_mm256_extracti128_si256(t4, 1), _mm256_extracti128_si256(t2, 1)); // blocks 6, 7, 14, 15 +#ifdef HAVE_FANCY_SIMD + s1 = _mm256_mullo_epi16(s1, _mm256_set1_epi16(-4)); + s2 = _mm256_mullo_epi16(s2, _mm256_set1_epi16(-4)); + s3 = _mm256_mullo_epi16(s3, _mm256_set1_epi16(-4)); + s4 = _mm256_mullo_epi16(s4, _mm256_set1_epi16(-4)); +#endif for (int iy = 0; iy < nrc_y; ++iy) { auto bsums = q8.load_bsums(iy, ibl); auto sumi = _mm256_setzero_si256(); @@ -3611,7 +3618,7 @@ static void mul_mat_q3_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm256_dpwssd_epi32(sumi, s2, _mm256_shuffle_epi32(bsums, 0x55)); sumi = _mm256_dpwssd_epi32(sumi, s3, _mm256_shuffle_epi32(bsums, 0xaa)); sumi = _mm256_dpwssd_epi32(sumi, s4, _mm256_shuffle_epi32(bsums, 0xff)); - acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(d4s[iy], _mm256_set1_ps(-4.f)), _mm256_cvtepi32_ps(sumi), acc[iy]); + isum[iy] = sumi; #else sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(s1, _mm256_shuffle_epi32(bsums, 0x00))); sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(s2, _mm256_shuffle_epi32(bsums, 0x55))); @@ -3627,9 +3634,7 @@ static void mul_mat_q3_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn } for (int ib = 0; ib < QK_K/32; ++ib) { auto iscales = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i *)(scales + 8*ib))); -#ifdef HAVE_FANCY_SIMD - auto scales = _mm256_cvtepi32_ps(iscales); -#else +#ifndef HAVE_FANCY_SIMD auto scales = _mm256_mul_ps(d4, _mm256_cvtepi32_ps(iscales)); #endif auto lb = _mm256_loadu_si256((const __m256i *)iq3[ibl].qs+ib); @@ -3647,7 +3652,7 @@ static void mul_mat_q3_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm256_dpbusd_epi32(sumi, qx[1], _mm256_shuffle_epi32(y, 0x55)); sumi = _mm256_dpbusd_epi32(sumi, qx[2], _mm256_shuffle_epi32(y, 0xaa)); sumi = _mm256_dpbusd_epi32(sumi, qx[3], _mm256_shuffle_epi32(y, 0xff)); - acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(scales, d4s[iy]), _mm256_cvtepi32_ps(sumi), acc[iy]); + isum[iy] = _mm256_add_epi32(isum[iy], _mm256_mullo_epi32(iscales, sumi)); #else auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x00)), _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0x55))); @@ -3664,6 +3669,12 @@ static void mul_mat_q3_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn } } +#ifdef HAVE_FANCY_SIMD + for (int iy = 0; iy < nrc_y; ++iy) { + auto d4y = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(iy, ibl))); + acc[iy] = _mm256_fmadd_ps(d4y, _mm256_cvtepi32_ps(isum[iy]), acc[iy]); + } +#endif } for (int iy = 0; iy < nrc_y; ++iy) { auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1)); @@ -3682,7 +3693,7 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn static const uint8_t k_shuff[32] = {0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15, 0, 1, 8, 9, 2, 3, 10, 11, 4, 5, 12, 13, 6, 7, 14, 15}; auto shuff = _mm256_loadu_si256((const __m256i *)k_shuff); #ifdef HAVE_FANCY_SIMD - __m256 d4s[nrc_y]; + __m256i isum[nrc_y]; #else auto m1 = _mm256_set1_epi16(1); #endif @@ -3694,11 +3705,7 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn for (int ibl = 0; ibl < nbl; ++ibl) { // Block of 256 auto dl = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq6[ibl].d)); auto d4 = _mm256_set_m128(dl, dl); -#ifdef HAVE_FANCY_SIMD - for (int iy = 0; iy < nrc_y; ++iy) { - d4s[iy] = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(iy, ibl))); - } -#else +#ifndef HAVE_FANCY_SIMD if constexpr (nrc_y == 1) { d4 = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(0, ibl))); } @@ -3715,6 +3722,12 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn auto s2 = MM256_SET_M128I(_mm256_extracti128_si256(t3, 1), _mm256_extracti128_si256(t1, 1)); // blocks 2, 3, 10, 11 auto s3 = MM256_SET_M128I(_mm256_extracti128_si256(t4, 0), _mm256_extracti128_si256(t2, 0)); // blocks 4, 5, 12, 13 auto s4 = MM256_SET_M128I(_mm256_extracti128_si256(t4, 1), _mm256_extracti128_si256(t2, 1)); // blocks 6, 7, 14, 15 +#ifdef HAVE_FANCY_SIMD + s1 = _mm256_mullo_epi16(s1, _mm256_set1_epi16(-32)); + s2 = _mm256_mullo_epi16(s2, _mm256_set1_epi16(-32)); + s3 = _mm256_mullo_epi16(s3, _mm256_set1_epi16(-32)); + s4 = _mm256_mullo_epi16(s4, _mm256_set1_epi16(-32)); +#endif for (int iy = 0; iy < nrc_y; ++iy) { auto bsums = q8.load_bsums(iy, ibl); auto sumi = _mm256_setzero_si256(); @@ -3723,7 +3736,7 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm256_dpwssd_epi32(sumi, s2, _mm256_shuffle_epi32(bsums, 0x55)); sumi = _mm256_dpwssd_epi32(sumi, s3, _mm256_shuffle_epi32(bsums, 0xaa)); sumi = _mm256_dpwssd_epi32(sumi, s4, _mm256_shuffle_epi32(bsums, 0xff)); - acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(d4s[iy], _mm256_set1_ps(-32.f)), _mm256_cvtepi32_ps(sumi), acc[iy]); + isum[iy] = sumi; #else sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(s1, _mm256_shuffle_epi32(bsums, 0x00))); sumi = _mm256_add_epi32(sumi, _mm256_madd_epi16(s2, _mm256_shuffle_epi32(bsums, 0x55))); @@ -3740,9 +3753,7 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn const uint32_t * scales = (const uint32_t *)iq6[ibl].scales; for (int ib = 0; ib < QK_K/32; ++ib) { auto iscales = _mm256_cvtepi8_epi32(_mm_loadl_epi64((const __m128i *)(scales + 2*ib))); -#ifdef HAVE_FANCY_SIMD - auto scales = _mm256_cvtepi32_ps(iscales); -#else +#ifndef HAVE_FANCY_SIMD auto scales = _mm256_mul_ps(d4, _mm256_cvtepi32_ps(iscales)); #endif auto lbits1 = _mm256_loadu_si256((const __m256i *)iq6[ibl].ql+2*ib+0); @@ -3760,7 +3771,7 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn sumi = _mm256_dpbusd_epi32(sumi, qx[1], _mm256_shuffle_epi32(y, 0x55)); sumi = _mm256_dpbusd_epi32(sumi, qx[2], _mm256_shuffle_epi32(y, 0xaa)); sumi = _mm256_dpbusd_epi32(sumi, qx[3], _mm256_shuffle_epi32(y, 0xff)); - acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(scales, d4s[iy]), _mm256_cvtepi32_ps(sumi), acc[iy]); + isum[iy] = _mm256_add_epi32(isum[iy], _mm256_mullo_epi32(iscales, sumi)); #else auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(qx[0], _mm256_shuffle_epi32(y, 0x00)), _mm256_maddubs_epi16(qx[1], _mm256_shuffle_epi32(y, 0x55))); @@ -3776,6 +3787,12 @@ static void mul_mat_q6_k_r4_q8_k(int n, const void * vx, size_t bx, const DataIn #endif } } +#ifdef HAVE_FANCY_SIMD + for (int iy = 0; iy < nrc_y; ++iy) { + auto d4y = _mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(iy, ibl))); + acc[iy] = _mm256_fmadd_ps(d4y, _mm256_cvtepi32_ps(isum[iy]), acc[iy]); + } +#endif } for (int iy = 0; iy < nrc_y; ++iy) { auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1)); |