summaryrefslogtreecommitdiff
path: root/ggml
diff options
context:
space:
mode:
Diffstat (limited to 'ggml')
-rw-r--r--ggml/include/ggml.h4
-rw-r--r--ggml/src/ggml-common.h14
-rw-r--r--ggml/src/ggml-quants.c1
-rw-r--r--ggml/src/ggml.c26
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp189
-rw-r--r--ggml/src/iqk/iqk_quantize.cpp142
-rw-r--r--ggml/src/iqk/iqk_quantize.h6
7 files changed, 366 insertions, 16 deletions
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index 2358fb76..99c39b9c 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -408,8 +408,10 @@ extern "C" {
GGML_TYPE_IQ4_KSS = 146,
GGML_TYPE_Q4_0_R4 = 202,
+ GGML_TYPE_Q5_0_R4 = 206,
GGML_TYPE_Q8_0_R4 = 208,
GGML_TYPE_IQ4_NL_X4 = 220,
+ GGML_TYPE_Q6_0_R4 = 233,
GGML_TYPE_COUNT,
};
@@ -471,7 +473,9 @@ extern "C" {
//
GGML_FTYPE_MOSTLY_Q4_0_R4 = 202, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_0_R4 = 207, // except 1d tensors
+ GGML_FTYPE_MOSTLY_Q5_0_R4 = 208, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_NL_X4 = 219, // except 1d tensors
+ GGML_FTYPE_MOSTLY_Q6_0_R4 = 227, // except 1d tensors
};
// available tensor operations:
diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h
index 2af3323d..fb87a602 100644
--- a/ggml/src/ggml-common.h
+++ b/ggml/src/ggml-common.h
@@ -182,6 +182,13 @@ typedef struct {
} block_q5_0;
static_assert(sizeof(block_q5_0) == sizeof(ggml_half) + sizeof(uint32_t) + QK5_0 / 2, "wrong q5_0 block size/padding");
+typedef struct {
+ ggml_half d[4]; // delta
+ uint8_t qh[QK5_0/2]; // 5-th bit of quants
+ uint8_t qs[QK5_0*2]; // nibbles / quants
+} block_q5_0_r4;
+static_assert(sizeof(block_q5_0_r4) == 4*sizeof(ggml_half) + QK5_0*2 + QK5_0/2, "wrong q5_0_r4 block size/padding");
+
#define QK5_1 32
typedef struct {
GGML_SCALE_TYPE1(m, dm);
@@ -198,6 +205,13 @@ typedef struct {
} block_q6_0;
static_assert(sizeof(block_q6_0) == sizeof(ggml_half) + QK6_0/2 + QK6_0/4, "wrong q6_0 block size/padding");
+typedef struct {
+ ggml_half d[4]; // delta
+ uint8_t qh[QK6_0]; // 5+6-th bit of quants
+ uint8_t qs[QK6_0*2]; // nibbles / quants
+} block_q6_0_r4;
+static_assert(sizeof(block_q6_0_r4) == 4*sizeof(ggml_half) + QK6_0*2 + QK6_0, "wrong q6_0_r4 block size/padding");
+
#define QK8_0 32
typedef struct {
ggml_half d; // delta
diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c
index 3140fc19..1953fb7e 100644
--- a/ggml/src/ggml-quants.c
+++ b/ggml/src/ggml-quants.c
@@ -15198,6 +15198,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
case GGML_TYPE_IQ4_KSS: break;
case GGML_TYPE_IQ4_NL_X4: break;
case GGML_TYPE_Q4_0_R4: break;
+ case GGML_TYPE_Q5_0_R4: break;
case GGML_TYPE_Q8_0_R4: break;
case GGML_TYPE_Q4_0_4_4:
case GGML_TYPE_Q4_0_4_8:
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index fd65ae67..0eb76a07 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1296,6 +1296,23 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 0,
},
+ [GGML_TYPE_Q5_0_R4] = {
+ .type_name = "q5_0_r4",
+ .blck_size = QK5_0,
+ .type_size = sizeof(block_q5_0),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_q5_0_r4,
+ .from_float = quantize_row_q5_0_r4,
+ .from_float_ref = (ggml_from_float_t)quantize_row_q5_0_r4_ref,
+ .vec_dot = vec_dot_q5_0_r4_q8_0,
+#if GGML_USE_IQK_MULMAT && defined __AVX2__
+ .vec_dot_type = GGML_TYPE_Q8_1,
+#else
+ .vec_dot_type = GGML_TYPE_Q8_0,
+#endif
+ .nrows = 1,
+ .row_meta_size = 0,
+ },
};
// For internal test use
@@ -3956,6 +3973,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
case GGML_FTYPE_MOSTLY_IQ4_NL_X4: wtype = GGML_TYPE_IQ4_NL_X4;break;
case GGML_FTYPE_MOSTLY_Q4_0_R4: wtype = GGML_TYPE_Q4_0_R4; break;
+ case GGML_FTYPE_MOSTLY_Q5_0_R4: wtype = GGML_TYPE_Q5_0_R4; break;
case GGML_FTYPE_MOSTLY_Q8_0_R4: wtype = GGML_TYPE_Q8_0_R4; break;
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
case GGML_FTYPE_MOSTLY_IQ4_KS: wtype = GGML_TYPE_IQ4_KS; break;
@@ -10482,6 +10500,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_Q4_0_R4:
+ case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -10927,6 +10946,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_Q4_0_R4:
+ case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -11069,6 +11089,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_Q4_0_R4:
+ case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -14257,6 +14278,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_Q4_0_R4:
+ case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -14639,6 +14661,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_Q4_0_R4:
+ case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -14915,6 +14938,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_Q4_0_R4:
+ case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -15518,6 +15542,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_NL_X4:
case GGML_TYPE_Q4_0_R4:
+ case GGML_TYPE_Q5_0_R4:
case GGML_TYPE_Q8_0_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -22347,6 +22372,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_NL_X4: result = quantize_iq4_nl_x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q4_0_R4: result = quantize_q4_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_Q5_0_R4: result = quantize_q5_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q8_0_R4: result = quantize_q8_0_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_KS: result = quantize_iq4_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index bbf7e379..4cdc1a08 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -2283,6 +2283,123 @@ static void mul_mat_q4_0_r4_q8_1(int n, const void * vx, size_t bx, const DataIn
}
#endif
+template <int nrc_y>
+static void mul_mat_q5_0_r4_q8_1_avx2(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ GGML_ASSERT(nrc_x%8 == 0);
+ Q8<nrc_y, block_q8_1_x4> q8(info);
+ auto m4 = _mm256_set1_epi8(0xf);
+ auto m5 = _mm256_set1_epi8(0x10);
+ auto m1 = _mm256_set1_epi16(1);
+ int nb = n / QK5_0;
+ GGML_ASSERT(nb%4 == 0);
+ __m256 acc[nrc_y] = {};
+ for (int ix = 0; ix < nrc_x; ix += 4) {
+ const block_q5_0_r4 * iq5 = (const block_q5_0_r4 *)((const char *)vx + ix*bx);
+ for (int ib4 = 0; ib4 < nb/4; ++ib4) {
+ for (int k = 0; k < 4; ++k) {
+ auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq5[4*ib4+k].d));
+ auto scales = _mm256_set_m128(scales128, scales128);
+ auto scales_m = _mm256_mul_ps(scales, _mm256_set1_ps(-8.f));
+ auto bits1 = _mm256_loadu_si256((const __m256i *)iq5[4*ib4+k].qs+0);
+ auto bits2 = _mm256_loadu_si256((const __m256i *)iq5[4*ib4+k].qs+1);
+ auto hbits = _mm_loadu_si128((const __m128i *)iq5[4*ib4+k].qh);
+ auto hb = MM256_SET_M128I(_mm_srli_epi16(hbits, 1), hbits);
+ auto q1 = _mm256_and_si256(bits1, m4) | _mm256_and_si256(_mm256_slli_epi16(hb, 4), m5);
+ auto q2 = _mm256_and_si256(bits2, m4) | _mm256_and_si256(_mm256_slli_epi16(hb, 2), m5);
+ auto q3 = _mm256_and_si256(_mm256_srli_epi16(bits1, 4), m4) | _mm256_and_si256(hb, m5);
+ auto q4 = _mm256_and_si256(_mm256_srli_epi16(bits2, 4), m4) | _mm256_and_si256(_mm256_srli_epi16(hb, 2), m5);;
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k);
+ auto sumi1 = _mm256_add_epi16(_mm256_maddubs_epi16(q1, _mm256_shuffle_epi32(y, 0x00)),
+ _mm256_maddubs_epi16(q2, _mm256_shuffle_epi32(y, 0x55)));
+ auto sumi2 = _mm256_add_epi16(_mm256_maddubs_epi16(q3, _mm256_shuffle_epi32(y, 0xaa)),
+ _mm256_maddubs_epi16(q4, _mm256_shuffle_epi32(y, 0xff)));
+ auto sumi = _mm256_madd_epi16(m1, _mm256_add_epi16(sumi1, sumi2));
+ auto d4d8 = _mm256_mul_ps(scales, _mm256_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k])));
+ acc[iy] = _mm256_fmadd_ps(d4d8, _mm256_cvtepi32_ps(sumi), acc[iy]);
+ acc[iy] = _mm256_fmadd_ps(scales_m, _mm256_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k+4])), acc[iy]);
+ }
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
+ info.store(ix, iy, sum);
+ acc[iy] = _mm256_setzero_ps();
+ }
+ }
+}
+
+#ifdef HAVE_FANCY_SIMD
+template <int nrc_y>
+static void mul_mat_q5_0_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ if constexpr (nrc_y == 1) {
+ mul_mat_q5_0_r4_q8_1_avx2<1>(n, vx, bx, info, nrc_x);
+ } else {
+ GGML_ASSERT(nrc_x%8 == 0);
+ Q8<nrc_y, block_q8_1_x4> q8(info);
+ auto m4 = _mm512_set1_epi8(0xf);
+ auto m5 = _mm512_set1_epi8(0x10);
+ int nb = n / QK5_0;
+ GGML_ASSERT(nb%4 == 0);
+ __m512 acc[2*nrc_y] = {};
+ __m512i qx[4];
+ for (int ix = 0; ix < nrc_x; ix += 8) {
+ const block_q5_0_r4 * iq5l = (const block_q5_0_r4 *)((const char *)vx + (ix+0)*bx);
+ const block_q5_0_r4 * iq5h = (const block_q5_0_r4 *)((const char *)vx + (ix+4)*bx);
+ for (int ib4 = 0; ib4 < nb/4; ++ib4) {
+ for (int k = 0; k < 4; ++k) {
+ auto scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq5l[4*ib4+k].d));
+ auto scales1 = _mm256_set_m128(scales128, scales128);
+ scales128 = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq5h[4*ib4+k].d));
+ auto scales2 = _mm256_set_m128(scales128, scales128);
+ auto scales = _mm512_insertf32x8(_mm512_castps256_ps512(scales1), scales2, 1);
+ auto scales_m = _mm512_mul_ps(scales, _mm512_set1_ps(-8.f));
+ auto bits1 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq5l[4*ib4+k].qs+0)),
+ _mm256_loadu_si256((const __m256i *)iq5h[4*ib4+k].qs+0), 1);
+ auto bits2 = _mm512_inserti32x8(_mm512_castsi256_si512(_mm256_loadu_si256((const __m256i *)iq5l[4*ib4+k].qs+1)),
+ _mm256_loadu_si256((const __m256i *)iq5h[4*ib4+k].qs+1), 1);
+ auto hbits1 = _mm_loadu_si128((const __m128i *)iq5l[4*ib4+k].qh);
+ auto hbits2 = _mm_loadu_si128((const __m128i *)iq5h[4*ib4+k].qh);
+ auto hb1 = MM256_SET_M128I(_mm_srli_epi16(hbits1, 1), hbits1);
+ auto hb2 = MM256_SET_M128I(_mm_srli_epi16(hbits2, 1), hbits2);
+ auto hb = _mm512_inserti32x8(_mm512_castsi256_si512(hb1), hb2, 1);
+ qx[0] = _mm512_and_si512(bits1, m4) | _mm512_and_si512(_mm512_slli_epi16(hb, 4), m5);
+ qx[1] = _mm512_and_si512(bits2, m4) | _mm512_and_si512(_mm512_slli_epi16(hb, 2), m5);;
+ //qx[2] = _mm512_and_si512(_mm512_srli_epi16(bits1, 4), m4) | _mm512_and_si512(_mm512_slli_epi16(hb, 2), m5);
+ qx[2] = _mm512_and_si512(_mm512_srli_epi16(bits1, 4), m4) | _mm512_and_si512(hb, m5);
+ qx[3] = _mm512_and_si512(_mm512_srli_epi16(bits2, 4), m4) | _mm512_and_si512(_mm512_srli_epi16(hb, 2), m5);
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y8 = _mm256_loadu_si256((const __m256i*)q8.y[iy][ib4].qs+k);
+ auto y = _mm512_inserti32x8(_mm512_castsi256_si512(y8), y8, 1);
+ auto sumi = _mm512_setzero_si512();
+ sumi = _mm512_dpbusd_epi32(sumi, qx[0], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x00)));
+ sumi = _mm512_dpbusd_epi32(sumi, qx[1], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0x55)));
+ sumi = _mm512_dpbusd_epi32(sumi, qx[2], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xaa)));
+ sumi = _mm512_dpbusd_epi32(sumi, qx[3], _mm512_shuffle_epi32(y, _MM_PERM_ENUM(0xff)));
+ auto dy = _mm512_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k]));
+ acc[2*iy+0] = _mm512_fmadd_ps(_mm512_mul_ps(scales, dy), _mm512_cvtepi32_ps(sumi), acc[2*iy+0]);
+ acc[2*iy+1] = _mm512_fmadd_ps(scales_m, _mm512_set1_ps(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k+4])), acc[2*iy+1]);
+ }
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto sum512 = _mm512_add_ps(acc[2*iy+0], acc[2*iy+1]);
+ acc[2*iy+0] = acc[2*iy+1] = _mm512_setzero_ps();
+ auto sum1 = _mm_add_ps(_mm512_extractf32x4_ps(sum512, 0), _mm512_extractf32x4_ps(sum512, 1));
+ auto sum2 = _mm_add_ps(_mm512_extractf32x4_ps(sum512, 2), _mm512_extractf32x4_ps(sum512, 3));
+ info.store(ix+0, iy, sum1);
+ info.store(ix+4, iy, sum2);
+ }
+ }
+ }
+}
+#else
+template <int nrc_y>
+static void mul_mat_q5_0_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ mul_mat_q5_0_r4_q8_1_avx2<nrc_y>(n, vx, bx, info, nrc_x);
+}
+#endif
+
#ifdef HAVE_FANCY_SIMD
template <int nrc_y>
static void mul_mat_q8_0_r4_q8_1(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
@@ -4398,6 +4515,18 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
mm.funcs[7] = mul_mat_q4_0_r4_q8_1<8>;
expected_typeB = GGML_TYPE_Q8_1;
break;
+ case GGML_TYPE_Q5_0_R4:
+ assert (ne00 % QK4_NL == 0);
+ mm.funcs[0] = mul_mat_q5_0_r4_q8_1<1>;
+ mm.funcs[1] = mul_mat_q5_0_r4_q8_1<2>;
+ mm.funcs[2] = mul_mat_q5_0_r4_q8_1<3>;
+ mm.funcs[3] = mul_mat_q5_0_r4_q8_1<4>;
+ mm.funcs[4] = mul_mat_q5_0_r4_q8_1<5>;
+ mm.funcs[5] = mul_mat_q5_0_r4_q8_1<6>;
+ mm.funcs[6] = mul_mat_q5_0_r4_q8_1<7>;
+ mm.funcs[7] = mul_mat_q5_0_r4_q8_1<8>;
+ expected_typeB = GGML_TYPE_Q8_1;
+ break;
case GGML_TYPE_Q8_0_R4:
assert (ne00 % QK4_NL == 0);
mm.funcs[0] = mul_mat_q8_0_r4_q8_1<1>;
@@ -6952,6 +7081,55 @@ void mul_mat_q4_0_r4_q8_0(int n, const void * vx, size_t bx, const DataInfo& inf
}
template <int nrc_y>
+void mul_mat_q5_0_r4_q8_0(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ GGML_ASSERT(nrc_x%4 == 0);
+ Q8<nrc_y, block_q8_0_x4> q8(info);
+ auto m4 = vdupq_n_u8(0x0f);
+ auto m5 = vdupq_n_u8(0x10);
+ auto m16 = vdupq_n_s8(-16);
+ int nb = n / QK5_0;
+ GGML_ASSERT(nb%4 == 0);
+ int8x16_t qx[8];
+ float32x4_t acc[nrc_y] = {};
+ for (int ix = 0; ix < nrc_x; ix += 4) {
+ const block_q5_0_r4 * iq5 = (const block_q5_0_r4 *)((const char *)vx + ix*bx);
+ for (int ib4 = 0; ib4 < nb/4; ++ib4) {
+ for (int k = 0; k < 4; ++k) {
+ auto scales = vcvt_f32_f16(vld1_f16((const float16_t *)iq5[4*ib4+k].d));
+ auto lbits = vld1q_u8_x4(iq5[4*ib4+k].qs);
+ auto hbits = vld1q_u8(iq5[4*ib4+k].qh);
+ qx[0] = vaddq_s8(vandq_u8(lbits.val[0], m4) | vandq_u8(vshlq_n_u8(hbits, 4), m5), m16); // 0...3
+ qx[1] = vaddq_s8(vandq_u8(lbits.val[1], m4) | vandq_u8(vshlq_n_u8(hbits, 3), m5), m16); // 16..19
+ qx[2] = vaddq_s8(vandq_u8(lbits.val[2], m4) | vandq_u8(vshlq_n_u8(hbits, 2), m5), m16); // 4...7
+ qx[3] = vaddq_s8(vandq_u8(lbits.val[3], m4) | vandq_u8(vshlq_n_u8(hbits, 1), m5), m16); // 20..23
+ qx[4] = vaddq_s8(vshrq_n_u8(lbits.val[0], 4)| vandq_u8(hbits, m5), m16); // 8..11
+ qx[5] = vaddq_s8(vshrq_n_u8(lbits.val[1], 4)| vandq_u8(vshrq_n_u8(hbits, 1), m5), m16); // 24..27
+ qx[6] = vaddq_s8(vshrq_n_u8(lbits.val[2], 4)| vandq_u8(vshrq_n_u8(hbits, 2), m5), m16); // 12..15
+ qx[7] = vaddq_s8(vshrq_n_u8(lbits.val[3], 4)| vandq_u8(vshrq_n_u8(hbits, 3), m5), m16); // 28..31
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = vld1q_s8_x2(q8.y[iy][ib4].qs+32*k);
+ auto sumi = vdupq_n_s32(0);
+ sumi = vdotq_laneq_s32(sumi, qx[0], y.val[0], 0);
+ sumi = vdotq_laneq_s32(sumi, qx[1], y.val[1], 0);
+ sumi = vdotq_laneq_s32(sumi, qx[2], y.val[0], 1);
+ sumi = vdotq_laneq_s32(sumi, qx[3], y.val[1], 1);
+ sumi = vdotq_laneq_s32(sumi, qx[4], y.val[0], 2);
+ sumi = vdotq_laneq_s32(sumi, qx[5], y.val[1], 2);
+ sumi = vdotq_laneq_s32(sumi, qx[6], y.val[0], 3);
+ sumi = vdotq_laneq_s32(sumi, qx[7], y.val[1], 3);
+ auto d4d8 = vmulq_f32(scales, vdupq_n_f32(GGML_FP16_TO_FP32(q8.y[iy][ib4].d[k])));
+ acc[iy] = vfmaq_f32(acc[iy], d4d8, vcvtq_f32_s32(sumi));
+ }
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ info.store(ix, iy, acc[iy]);
+ acc[iy] = vdupq_n_f32(0.f);
+ }
+ }
+}
+
+template <int nrc_y>
void mul_mat_q8_0_r4_q8_0(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
GGML_ASSERT(nrc_x%4 == 0);
Q8<nrc_y, block_q8_0_x4> q8(info);
@@ -7179,6 +7357,17 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
m.funcs[7] = mul_mat_q4_0_r4_q8_0<8>;
expected_Btype = GGML_TYPE_Q8_0;
break;
+ case GGML_TYPE_Q5_0_R4:
+ m.funcs[0] = mul_mat_q5_0_r4_q8_0<1>;
+ m.funcs[1] = mul_mat_q5_0_r4_q8_0<2>;
+ m.funcs[2] = mul_mat_q5_0_r4_q8_0<3>;
+ m.funcs[3] = mul_mat_q5_0_r4_q8_0<4>;
+ m.funcs[4] = mul_mat_q5_0_r4_q8_0<5>;
+ m.funcs[5] = mul_mat_q5_0_r4_q8_0<6>;
+ m.funcs[6] = mul_mat_q5_0_r4_q8_0<7>;
+ m.funcs[7] = mul_mat_q5_0_r4_q8_0<8>;
+ expected_Btype = GGML_TYPE_Q8_0;
+ break;
case GGML_TYPE_Q8_0_R4:
m.funcs[0] = mul_mat_q8_0_r4_q8_0<1>;
m.funcs[1] = mul_mat_q8_0_r4_q8_0<2>;
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp
index 811a9fe9..eafb2887 100644
--- a/ggml/src/iqk/iqk_quantize.cpp
+++ b/ggml/src/iqk/iqk_quantize.cpp
@@ -3224,12 +3224,24 @@ static void repack_q4_0(int nrows, int n_per_row, const block_q4_0 * x, block_iq
for (int row = 0; row < nrows; row += 4) {
for (int k = 0; k < 4; ++k) x4[k] = x + nblock*k;
for (int ib = 0; ib < nblock; ++ib) {
- for (int k = 0; k < 4; ++k) y[ib].d[k] = x4[k][ib].d;
- for (int k = 0; k < 4; ++k) for (int i = 0; i < 4; ++i) {
- y[ib].qs[4*k+i+ 0] = (x4[k][ib].qs[i+0] & 0xf) | ((x4[k][ib].qs[i+ 8] & 0x0f) << 4); // 0....3 + 8...11 from each row
- y[ib].qs[4*k+i+16] = (x4[k][ib].qs[i+0] >> 4) | ((x4[k][ib].qs[i+ 8] & 0xf0)); // 16...19 + 24...27 from each row
- y[ib].qs[4*k+i+32] = (x4[k][ib].qs[i+4] & 0xf) | ((x4[k][ib].qs[i+12] & 0x0f) << 4); // 4....7 + 12...15 from each row
- y[ib].qs[4*k+i+48] = (x4[k][ib].qs[i+4] >> 4) | ((x4[k][ib].qs[i+12] & 0xf0)); // 20...23 + 28...31 from each row
+ //for (int k = 0; k < 4; ++k) y[ib].d[k] = x4[k][ib].d;
+ //for (int k = 0; k < 4; ++k) for (int i = 0; i < 4; ++i) {
+ // y[ib].qs[4*k+i+ 0] = (x4[k][ib].qs[i+0] & 0xf) | ((x4[k][ib].qs[i+ 8] & 0x0f) << 4); // 0....3 + 8...11 from each row
+ // y[ib].qs[4*k+i+16] = (x4[k][ib].qs[i+0] >> 4) | ((x4[k][ib].qs[i+ 8] & 0xf0)); // 16...19 + 24...27 from each row
+ // y[ib].qs[4*k+i+32] = (x4[k][ib].qs[i+4] & 0xf) | ((x4[k][ib].qs[i+12] & 0x0f) << 4); // 4....7 + 12...15 from each row
+ // y[ib].qs[4*k+i+48] = (x4[k][ib].qs[i+4] >> 4) | ((x4[k][ib].qs[i+12] & 0xf0)); // 20...23 + 28...31 from each row
+ //}
+ for (int k = 0; k < 4; ++k) {
+ y[ib].d[k] = x4[k][ib].d;
+ for (int l = 0; l < 4; ++l) {
+ // l = 0 -> 0, 8 with shift 0 -> 4*(l/2), 4*(l/2)+8 with shift 4*(l%2)
+ // l = 1 -> 0, 8 with shift 4
+ // l = 2 -> 4, 12 with shift 0
+ // l = 3 -> 4, 12 with shift 4
+ for (int i = 0; i < 4; ++i) {
+ y[ib].qs[4*k+i+16*l] = ((x4[k][ib].qs[i+4*(l/2)] >> 4*(l%2)) & 0xf) | (((x4[k][ib].qs[i+4*(l/2)+8] >> 4*(l%2)) & 0xf) << 4);
+ }
+ }
}
}
x += 4*nblock;
@@ -3254,21 +3266,18 @@ size_t quantize_q4_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_
void dequantize_row_q4_0_r4(const block_iq4_nl_x4 * x, float * y, int64_t k) {
// we assume we are called with 4 rows
int n_per_row = k/4;
- int nb = n_per_row/QK4_NL;
+ int nb = n_per_row/QK4_0;
float * yk[4];
for (int k = 0; k < 4; ++k) yk[k] = y + k*n_per_row;
for (int ib = 0; ib < nb; ++ib) {
for (int k = 0; k < 4; ++k) {
float scale = GGML_FP16_TO_FP32(x[ib].d[k]);
- for (int i = 0; i < 4; ++i) {
- yk[k][QK4_NL*ib+i+ 0] = scale * ((x[ib].qs[4*k+i+ 0] & 0xf) - 8);
- yk[k][QK4_NL*ib+i+ 8] = scale * ((x[ib].qs[4*k+i+ 0] >> 4) - 8);
- yk[k][QK4_NL*ib+i+16] = scale * ((x[ib].qs[4*k+i+16] & 0xf) - 8);
- yk[k][QK4_NL*ib+i+24] = scale * ((x[ib].qs[4*k+i+16] >> 4) - 8);
- yk[k][QK4_NL*ib+i+ 4] = scale * ((x[ib].qs[4*k+i+32] & 0xf) - 8);
- yk[k][QK4_NL*ib+i+12] = scale * ((x[ib].qs[4*k+i+32] >> 4) - 8);
- yk[k][QK4_NL*ib+i+20] = scale * ((x[ib].qs[4*k+i+48] & 0xf) - 8);
- yk[k][QK4_NL*ib+i+28] = scale * ((x[ib].qs[4*k+i+48] >> 4) - 8);
+ for (int l = 0; l < 4; ++l) {
+ int ll = 16*(l%2) + 4*(l/2);
+ for (int i = 0; i < 4; ++i) {
+ yk[k][QK4_0*ib+i+ll+0] = scale * ((x[ib].qs[4*k+i+16*l] & 0xf) - 8);
+ yk[k][QK4_0*ib+i+ll+8] = scale * ((x[ib].qs[4*k+i+16*l] >> 4) - 8);
+ }
}
}
}
@@ -3365,3 +3374,104 @@ void vec_dot_q8_0_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t b
GGML_UNUSED(bx);
GGML_UNUSED(by);
}
+
+//
+// ========================================= q5_0_r4
+//
+void quantize_row_q5_0_r4_ref(const float * x, block_q5_0_r4 * y, int64_t k) {
+ // we assume we are called with 4 rows
+ quantize_q5_0_r4(x, (void *)y, 4, k/4, nullptr);
+}
+
+void quantize_row_q5_0_r4(const float * x, void * y, int64_t k) {
+ // we assume we are called with 4 rows
+ quantize_q5_0_r4(x, y, 4, k/4, nullptr);
+}
+
+static inline void convert_q5_0(const block_q5_0& x, uint8_t * L) {
+ uint32_t qh;
+ memcpy(&qh, x.qh, sizeof(qh));
+
+ for (int j = 0; j < QK5_0/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ L[j ] = (x.qs[j] & 0x0F) | xh_0;
+ L[j + QK4_0/2] = (x.qs[j] >> 4) | xh_1;
+ }
+}
+
+static void repack_q5_0(int nrows, int n_per_row, const block_q5_0 * x, block_q5_0_r4 * y) {
+ GGML_ASSERT(nrows%4 == 0);
+ GGML_ASSERT(n_per_row%QK5_0 == 0);
+ int nblock = n_per_row/QK5_0;
+ const block_q5_0 * x4[4];
+ uint8_t L[QK5_0];
+ for (int row = 0; row < nrows; row += 4) {
+ for (int k = 0; k < 4; ++k) x4[k] = x + nblock*k;
+ for (int ib = 0; ib < nblock; ++ib) {
+ std::memset(y[ib].qh, 0, QK5_0/2);
+ for (int k = 0; k < 4; ++k) {
+ y[ib].d[k] = x4[k][ib].d;
+ convert_q5_0(x4[k][ib], L);
+ for (int l = 0; l < 4; ++l) {
+ int l1 = 4*(l/2) + 16*(l%2), l2 = l1 + 8;
+ for (int i = 0; i < 4; ++i) {
+ y[ib].qs[4*k+i+16*l] = (L[i + l1] & 0xf) | ((L[i + l2] & 0xf) << 4);
+ y[ib].qh[4*k+i] |= ((L[i + l1] >> 4) | ((L[i + l2] >> 4) << 4)) << l;
+ }
+ }
+ }
+ }
+ x += 4*nblock;
+ y += nblock;
+ }
+}
+
+size_t quantize_q5_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+ GGML_ASSERT(nrows%4 == 0);
+ auto row_size_0 = ggml_row_size(GGML_TYPE_Q5_0, n_per_row);
+ std::vector<char> qtmp(4*row_size_0);
+ char * qrow = (char *)dst;
+ for (int row = 0; row < nrows; row += 4) {
+ quantize_q5_0(src, qtmp.data(), 4, n_per_row, imatrix);
+ repack_q5_0(4, n_per_row, (const block_q5_0 *)qtmp.data(), (block_q5_0_r4 *)qrow);
+ src += 4*n_per_row;
+ qrow += 4*row_size_0;
+ }
+ return nrows*row_size_0;
+}
+
+void dequantize_row_q5_0_r4(const block_q5_0_r4 * x, float * y, int64_t k) {
+ // we assume we are called with 4 rows
+ int n_per_row = k/4;
+ int nb = n_per_row/QK8_0;
+ float * yk[4];
+ for (int k = 0; k < 4; ++k) yk[k] = y + k*n_per_row;
+ for (int ib = 0; ib < nb; ++ib) {
+ for (int k = 0; k < 4; ++k) {
+ float d = GGML_FP16_TO_FP32(x[ib].d[k]);
+ float m = -16*d;
+ for (int l = 0; l < 4; ++l) {
+ int ll = 16*(l%2) + 4*(l/2);
+ for (int i = 0; i < 4; ++i) {
+ yk[k][QK4_0*ib+i+ll+0] = d * ((x[ib].qs[4*k+i+16*l] & 0xf) | (((x[ib].qh[4*k+i] >> (l+0)) & 1) << 4)) + m;
+ yk[k][QK4_0*ib+i+ll+8] = d * ((x[ib].qs[4*k+i+16*l] >> 4) | (((x[ib].qh[4*k+i] >> (l+4)) & 1) << 4)) + m;
+ }
+ }
+ }
+ }
+}
+
+void vec_dot_q5_0_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+#if GGML_USE_IQK_MULMAT
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_Q5_0_R4, vx, 0, GGML_TYPE_Q8_0, vy, 0, s, 0, 0, 1)) {
+ return;
+ }
+#endif
+ GGML_ASSERT(n%QK4_NL == 0);
+ GGML_ASSERT(nrc == 1);
+ GGML_UNUSED(bs);
+ GGML_UNUSED(bx);
+ GGML_UNUSED(by);
+}
diff --git a/ggml/src/iqk/iqk_quantize.h b/ggml/src/iqk/iqk_quantize.h
index 53caed4c..24c241a2 100644
--- a/ggml/src/iqk/iqk_quantize.h
+++ b/ggml/src/iqk/iqk_quantize.h
@@ -81,6 +81,12 @@ size_t quantize_q8_0_r4(const float * GGML_RESTRICT src, void * GGML_RESTRICT ds
void dequantize_row_q8_0_r4(const block_q8_0_x4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_q8_0_r4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+void quantize_row_q5_0_r4_ref(const float * GGML_RESTRICT x, block_q5_0_r4 * GGML_RESTRICT y, int64_t k);
+void quantize_row_q5_0_r4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+size_t quantize_q5_0_r4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
+void dequantize_row_q5_0_r4(const block_q5_0_r4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
+void vec_dot_q5_0_r4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+
#ifdef __cplusplus
}
#endif