diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-12-12 16:04:20 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-12-12 16:04:20 +0100 |
commit | 2700d3af36e5ce11f6c2c9644130635f83bae6d5 (patch) | |
tree | 7e3a65fd9c8318fd654f83f7431b906cc017b34a /ggml/src/iqk/iqk_quantize.cpp | |
parent | aecc95c0cabc6604642e7bc4a8c9e5cb5233ebc4 (diff) |
IQ4_K_R4 (#138)
* iq4_k_r4: WIP
* iq4_k_r4: Zen4 and hopefully AVX2
On Zen4 we get PP-512(LLaMA-3.1-8B) = 232.6 t/s, up from 182.2 t/s
for iq4_k. Applying the extra shift costs a ~6 performance penalty.
* iq4_k_r4: AVX2
PP-512 = 227.60 t/s. The shifts are really costly.
* iq4_k_r4: NEON
We get PP-512(LLaMA-3.1-8B) = 108 t/s, up from 58.2 t/s for iq4_k.
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/iqk/iqk_quantize.cpp')
-rw-r--r-- | ggml/src/iqk/iqk_quantize.cpp | 114 |
1 files changed, 114 insertions, 0 deletions
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp index 49e2cf8e..438a277e 100644 --- a/ggml/src/iqk/iqk_quantize.cpp +++ b/ggml/src/iqk/iqk_quantize.cpp @@ -4552,3 +4552,117 @@ void vec_dot_q2_k_r4_q8_k(int n, float * s, size_t bs, const void * vx, size_t b GGML_UNUSED(by); } +// +// ========================================= iq4_k_r4 +// + +void quantize_row_iq4_k_r4_ref(const float * x, block_iq4_k_r4 * y, int64_t k) { + quantize_iq4_k_r4(x, (void *)y, 4, k/4, nullptr); +} + +void quantize_row_iq4_k_r4(const float * x, void * y, int64_t k) { + quantize_iq4_k_r4(x, y, 4, k/4, nullptr); +} + +static void repack_iq4_k(int nrows, int n_per_row, const block_iq4_k * x, block_iq4_k_r4 * y) { + GGML_ASSERT(nrows%4 == 0); + GGML_ASSERT(n_per_row%QK_K == 0); + int nblock = n_per_row/QK_K; + const block_iq4_k * x4[4]; + for (int row = 0; row < nrows; row += 4) { + for (int k = 0; k < 4; ++k) x4[k] = x + nblock*k; + for (int ibl = 0; ibl < nblock; ++ibl) { + std::memset(y[ibl].extra, 0, 8); + std::memset(y[ibl].scales_l, 0, QK_K/8); + std::memset(y[ibl].scales_h, 0, QK_K/16); + for (int k = 0; k < 4; ++k) { + y[ibl].d[k] = x4[k][ibl].d; + auto extra = x4[k][ibl].extra; + for (int ib = 0; ib < QK_K/32; ++ib) { + if (extra & 1) y[ibl].extra[k+0] |= (1 << ib); + if (extra & 2) y[ibl].extra[k+4] |= (1 << ib); + extra >>= 2; + uint8_t sl1 = x4[k][ibl].scales_l[ib] & 0xf; + uint8_t sl2 = x4[k][ibl].scales_l[ib] >> 4; + uint8_t sh = x4[k][ibl].scales_h[ib/2] >> 4*(ib%2); + uint8_t sh1 = (sh >> 0) & 3; + uint8_t sh2 = (sh >> 2) & 3; + int i = 8*ib + k; + y[ibl].scales_l[i%32] |= (sl1 << 4*(i/32)); + y[ibl].scales_h[i%16] |= (sh1 << 2*(i/16)); + i += 4; + y[ibl].scales_l[i%32] |= (sl2 << 4*(i/32)); + y[ibl].scales_h[i%16] |= (sh2 << 2*(i/16)); + } + } + for (int ib = 0; ib < QK_K/32; ++ib) { + for (int k = 0; k < 4; ++k) for (int i = 0; i < 4; ++i) { + y[ibl].qs[64*ib+4*k+i+ 0] = (x4[k][ibl].qs[16*ib+i+0] & 0xf) | ((x4[k][ibl].qs[16*ib+i+ 8] & 0x0f) << 4); // 0....3 + 8...11 from each row + y[ibl].qs[64*ib+4*k+i+16] = (x4[k][ibl].qs[16*ib+i+0] >> 4) | ((x4[k][ibl].qs[16*ib+i+ 8] & 0xf0)); // 16...19 + 24...27 from each row + y[ibl].qs[64*ib+4*k+i+32] = (x4[k][ibl].qs[16*ib+i+4] & 0xf) | ((x4[k][ibl].qs[16*ib+i+12] & 0x0f) << 4); // 4....7 + 12...15 from each row + y[ibl].qs[64*ib+4*k+i+48] = (x4[k][ibl].qs[16*ib+i+4] >> 4) | ((x4[k][ibl].qs[16*ib+i+12] & 0xf0)); // 20...23 + 28...31 from each row + } + } + } + x += 4*nblock; + y += nblock; + } +} + +size_t quantize_iq4_k_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) { + GGML_ASSERT(nrows%4 == 0); + GGML_ASSERT(n_per_row%QK_K == 0); + char * qcur = (char *)dst; + auto row_size = ggml_row_size(GGML_TYPE_IQ4_K, n_per_row); + std::vector<char> qtmp(4*row_size); + for (int row = 0; row < nrows; row += 4) { + quantize_iq4_k(src, (void *)qtmp.data(), 4, n_per_row, imatrix); + repack_iq4_k(4, n_per_row, (const block_iq4_k *)qtmp.data(), (block_iq4_k_r4 *)qcur); + qcur += 4*row_size; + src += 4*n_per_row; + } + return nrows*row_size; +} + +void dequantize_row_iq4_k_r4(const block_iq4_k_r4 * x, float * y, int64_t k) { + auto n_per_row = k/4; + float * y4[4] = {y, y + n_per_row, y + 2*n_per_row, y + 3*n_per_row}; + int nblock = n_per_row/QK_K; + for (int ibl = 0; ibl < nblock; ++ibl) { + for (int k = 0; k < 4; ++k) { + const float d = GGML_FP16_TO_FP32(x[ibl].d[k]); + for (int ib = 0; ib < QK_K/32; ++ib) { + int is = 8*ib + k; + float dl1 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); + is += 4; + float dl2 = d * ((((x[ibl].scales_l[is%32] >> 4*(is/32)) & 0xf) | (((x[ibl].scales_h[is%16] >> 2*(is/16)) & 3) << 4)) - 32); + auto values1 = iq4k_values + (x[ibl].extra[k+0] & (1 << ib) ? 16 : 0); + auto values2 = iq4k_values + (x[ibl].extra[k+4] & (1 << ib) ? 16 : 0); + for (int i = 0; i < 4; ++i) { + y4[k][QK_K*ibl+32*ib+i+ 0] = dl1 * values1[x[ibl].qs[64*ib+4*k+i+ 0] & 0xf]; + y4[k][QK_K*ibl+32*ib+i+ 8] = dl1 * values1[x[ibl].qs[64*ib+4*k+i+ 0] >> 4]; + y4[k][QK_K*ibl+32*ib+i+16] = dl2 * values2[x[ibl].qs[64*ib+4*k+i+16] & 0xf]; + y4[k][QK_K*ibl+32*ib+i+24] = dl2 * values2[x[ibl].qs[64*ib+4*k+i+16] >> 4]; + y4[k][QK_K*ibl+32*ib+i+ 4] = dl1 * values1[x[ibl].qs[64*ib+4*k+i+32] & 0xf]; + y4[k][QK_K*ibl+32*ib+i+12] = dl1 * values1[x[ibl].qs[64*ib+4*k+i+32] >> 4]; + y4[k][QK_K*ibl+32*ib+i+20] = dl2 * values2[x[ibl].qs[64*ib+4*k+i+48] & 0xf]; + y4[k][QK_K*ibl+32*ib+i+28] = dl2 * values2[x[ibl].qs[64*ib+4*k+i+48] >> 4]; + } + } + } + } +} + +void vec_dot_iq4_k_r4_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) { +#if GGML_USE_IQK_MULMAT + if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ4_K_R4, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) { + return; + } +#endif + GGML_ASSERT(n%QK4_NL == 0); + GGML_ASSERT(nrc == 1); + GGML_UNUSED(bs); + GGML_UNUSED(bx); + GGML_UNUSED(by); +} + |