summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/quantize/quantize.cpp1
-rw-r--r--ggml/include/ggml.h2
-rw-r--r--ggml/src/ggml-common.h9
-rw-r--r--ggml/src/ggml-quants.c1
-rw-r--r--ggml/src/ggml.c23
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp269
-rw-r--r--ggml/src/iqk/iqk_quantize.cpp108
-rw-r--r--ggml/src/iqk/iqk_quantize.h6
-rw-r--r--include/llama.h1
-rw-r--r--src/llama.cpp21
10 files changed, 394 insertions, 47 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 1599405b..5ffdbc84 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -39,6 +39,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "IQ3_XXS", LLAMA_FTYPE_MOSTLY_IQ3_XXS, " 3.06 bpw quantization", },
{ "IQ3_XXS_R4",LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4,"IQ3_XXS repacked", },
{ "IQ3_S", LLAMA_FTYPE_MOSTLY_IQ3_S, " 3.44 bpw quantization", },
+ { "IQ3_S_R4", LLAMA_FTYPE_MOSTLY_IQ3_S_R4, "IQ3_S repacked", },
{ "IQ3_M", LLAMA_FTYPE_MOSTLY_IQ3_M, " 3.66 bpw quantization mix", },
{ "Q3_K", LLAMA_FTYPE_MOSTLY_Q3_K_M, "alias for Q3_K_M" },
{ "Q3_K_R4", LLAMA_FTYPE_MOSTLY_Q3_K_R4, "Q3_K_S repacked" },
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index 002388cb..5fd8324d 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -422,6 +422,7 @@ extern "C" {
GGML_TYPE_IQ2_XS_R4 = 217,
GGML_TYPE_IQ3_XXS_R4= 218,
GGML_TYPE_IQ4_NL_R4 = 220,
+ GGML_TYPE_IQ3_S_R4 = 221,
GGML_TYPE_IQ2_S_R4 = 222,
GGML_TYPE_IQ4_XS_R4 = 223,
GGML_TYPE_BF16_R16 = 230,
@@ -504,6 +505,7 @@ extern "C" {
GGML_FTYPE_MOSTLY_IQ2_XS_R4 = 216, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ3_XXS_R4= 217, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_NL_R4 = 219, // except 1d tensors
+ GGML_FTYPE_MOSTLY_IQ3_S_R4 = 220, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ2_S_R4 = 221, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_XS_R4 = 222, // except 1d tensors
GGML_FTYPE_MOSTLY_BF16_R16 = 224, // except 1d tensors
diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h
index 6964f5e6..7f79b27b 100644
--- a/ggml/src/ggml-common.h
+++ b/ggml/src/ggml-common.h
@@ -465,6 +465,15 @@ typedef struct {
static_assert(sizeof(block_iq3_s) == sizeof(ggml_half) + 13*(QK_K/32) + IQ3S_N_SCALE, "wrong iq3_s block size/padding");
typedef struct {
+ ggml_half d[4];
+ uint8_t qs[QK_K];
+ uint8_t qh[QK_K/8];
+ uint8_t signs[QK_K/2];
+ uint8_t scales[4*IQ3S_N_SCALE];
+} block_iq3_s_r4;
+static_assert(sizeof(block_iq3_s_r4) == 4*sizeof(block_iq3_s), "wrong iq3_s_r4 block size/padding");
+
+typedef struct {
ggml_half d;
uint8_t qs[QK_K/8];
uint16_t qh[QK_K/32];
diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c
index bf028c0c..27d3f5c4 100644
--- a/ggml/src/ggml-quants.c
+++ b/ggml/src/ggml-quants.c
@@ -15201,6 +15201,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
case GGML_TYPE_IQ2_XXS_R4: break;
case GGML_TYPE_IQ2_XS_R4: break;
case GGML_TYPE_IQ3_XXS_R4: break;
+ case GGML_TYPE_IQ3_S_R4: break;
case GGML_TYPE_IQ2_S_R4: break;
case GGML_TYPE_Q4_0_R4: break;
case GGML_TYPE_Q5_0_R4: break;
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 2cece547..71d84c94 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1083,6 +1083,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 0,
},
+ [GGML_TYPE_IQ3_S_R4] = {
+ .type_name = "iq3_s_r4",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq3_s),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq3_s_r4,
+ .from_float = quantize_row_iq3_s_r4,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq3_s_r4_ref,
+ .vec_dot = vec_dot_iq3_s_r4_q8_k,
+ .vec_dot_type = GGML_TYPE_Q8_K,
+ .nrows = 1,
+ .row_meta_size = 0,
+ },
[GGML_TYPE_IQ2_S] = {
.type_name = "iq2_s",
.blck_size = QK_K,
@@ -4282,6 +4295,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ5_K_R4: wtype = GGML_TYPE_IQ5_K_R4; break;
case GGML_FTYPE_MOSTLY_IQ6_K: wtype = GGML_TYPE_IQ6_K; break;
case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
+ case GGML_FTYPE_MOSTLY_IQ3_S_R4: wtype = GGML_TYPE_IQ3_S_R4; break;
case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
case GGML_FTYPE_MOSTLY_IQ2_S_R4: wtype = GGML_TYPE_IQ2_S_R4; break;
case GGML_FTYPE_MOSTLY_Q4_0_4_4: wtype = GGML_TYPE_Q4_0_4_4; break;
@@ -10827,6 +10841,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ5_K_R4:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ2_S_R4:
case GGML_TYPE_Q4_0_4_4:
@@ -11291,6 +11306,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ5_K_R4:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ2_S_R4:
case GGML_TYPE_Q4_0_4_4:
@@ -11452,6 +11468,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ5_K_R4:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ2_S_R4:
case GGML_TYPE_Q4_0_4_4:
@@ -14659,6 +14676,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ5_K_R4:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ2_S_R4:
case GGML_TYPE_Q4_0_4_4:
@@ -15060,6 +15078,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ5_K_R4:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ2_S_R4:
case GGML_TYPE_Q4_0_4_4:
@@ -15355,6 +15374,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ5_K_R4:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ2_S_R4:
case GGML_TYPE_Q4_0_4_4:
@@ -15979,6 +15999,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ5_K_R4:
case GGML_TYPE_IQ6_K:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ2_S_R4:
case GGML_TYPE_Q8_K:
@@ -22731,6 +22752,7 @@ void ggml_quantize_init(enum ggml_type type) {
case GGML_TYPE_IQ1_M: iq2xs_init_impl(type); break;
case GGML_TYPE_IQ3_XXS_R4:
case GGML_TYPE_IQ3_XXS: iq3xs_init_impl(256); break;
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ3_S: iq3xs_init_impl(512); break;
default: // nothing
break;
@@ -22807,6 +22829,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ3_XXS: result = quantize_iq3_xxs(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ3_XXS_R4:result = quantize_iq3_xxs_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ3_S: result = quantize_iq3_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ3_S_R4:result = quantize_iq3_s_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_S: result = quantize_iq2_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_S_R4:result = quantize_iq2_s_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ1_S: result = quantize_iq1_s (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index cf4bd7ab..ca75e0fd 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -204,6 +204,7 @@ struct MulMat {
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ2_XXS_R4:
case GGML_TYPE_IQ3_XXS_R4:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ2_BN_R4: return 4;
case GGML_TYPE_Q8_K_R8: return 8;
case GGML_TYPE_BF16_R16: return 16;
@@ -3981,6 +3982,136 @@ static void mul_mat_iq3_xxs_r4_q8_k(int n, const void * vx, size_t bx, const Dat
}
}
+#ifdef HAVE_FANCY_SIMD
+// Strangely enough, the following implementation makes PP ~6% slower and TG ~6% faster
+// compared to the vanilla AVX2 version below.
+struct IndexHelperIQ3S {
+ union index_t {
+ __m256i vec;
+ uint16_t val[16];
+ };
+ inline void make2(const uint8_t * qs, const uint8_t * qh, __m256i * values) const {
+ auto idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs));
+ const __mmask16 * m16 = (const __mmask16 *)qh;
+ index_t idx;
+ idx.vec = _mm256_mask_add_epi16(idx_l, m16[0], idx_l, offset);
+ values[0] = _mm256_set_epi32(iq3s_grid[idx.val[ 7]], iq3s_grid[idx.val[ 6]], iq3s_grid[idx.val[ 5]], iq3s_grid[idx.val[ 4]],
+ iq3s_grid[idx.val[ 3]], iq3s_grid[idx.val[ 2]], iq3s_grid[idx.val[ 1]], iq3s_grid[idx.val[ 0]]);
+ values[1] = _mm256_set_epi32(iq3s_grid[idx.val[15]], iq3s_grid[idx.val[14]], iq3s_grid[idx.val[13]], iq3s_grid[idx.val[12]],
+ iq3s_grid[idx.val[11]], iq3s_grid[idx.val[10]], iq3s_grid[idx.val[ 9]], iq3s_grid[idx.val[ 8]]);
+ }
+ const __m256i offset = _mm256_set1_epi16(256);
+};
+#else
+struct IndexHelperIQ3S {
+ union index_t {
+ __m256i vec;
+ uint32_t val[8];
+ };
+ inline void make2(const uint8_t * qs, const uint8_t * qh, __m256i * values) const {
+ index_t idx;
+ auto idx_l = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)qs));
+ auto idx_h = _mm256_and_si256(_mm256_sllv_epi32(_mm256_set1_epi32(qh[0]), idx_shift), idx_mask);
+ idx.vec = _mm256_or_si256(idx_h, idx_l);
+ values[0] = _mm256_set_epi32(iq3s_grid[idx.val[7]], iq3s_grid[idx.val[6]], iq3s_grid[idx.val[5]], iq3s_grid[idx.val[4]],
+ iq3s_grid[idx.val[3]], iq3s_grid[idx.val[2]], iq3s_grid[idx.val[1]], iq3s_grid[idx.val[0]]);
+ idx_l = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)(qs+8)));
+ idx_h = _mm256_and_si256(_mm256_sllv_epi32(_mm256_set1_epi32(qh[1]), idx_shift), idx_mask);
+ idx.vec = _mm256_or_si256(idx_h, idx_l);
+ values[1] = _mm256_set_epi32(iq3s_grid[idx.val[7]], iq3s_grid[idx.val[6]], iq3s_grid[idx.val[5]], iq3s_grid[idx.val[4]],
+ iq3s_grid[idx.val[3]], iq3s_grid[idx.val[2]], iq3s_grid[idx.val[1]], iq3s_grid[idx.val[0]]);
+ }
+ const __m256i idx_mask = _mm256_set1_epi32(256);
+ const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8);
+};
+#endif
+
+template <int nrc_y>
+static void mul_mat_iq3_s_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ GGML_ASSERT(nrc_x%4 == 0);
+ Q8<nrc_y, block_q8_K> q8(info);
+ int nbl = n / QK_K;
+ auto smask = _mm256_set1_epi8(1);
+ union { __m256i vec; uint32_t val[8]; } helper;
+ union { __m128i vec; uint16_t val[8]; } hidx;
+ __m256 acc[nrc_y] = {};
+ __m256i isum[nrc_y] = {};
+ __m256i qx[4];
+#ifdef HAVE_FANCY_SIMD
+ __mmask32 mask[4];
+#endif
+ for (int ix = 0; ix < nrc_x; ix += 4) {
+ auto iq3 = (const block_iq3_s_r4 *)((const char *)vx + (ix+0)*bx);
+ for (int ibl = 0; ibl < nbl; ++ibl) { // Block of 256
+ auto dl = _mm_cvtph_ps(_mm_loadl_epi64((const __m128i *)iq3[ibl].d));
+ auto d4 = _mm256_set_m128(dl, dl);
+ auto qs = iq3[ibl].qs;
+ auto qh = iq3[ibl].qh;
+ auto scale_bits = _mm_loadu_si128((const __m128i *)iq3[ibl].scales);
+ auto scales8 = MM256_SET_M128I(_mm_srli_epi16(scale_bits, 4), scale_bits);
+ helper.vec = _mm256_or_si256(_mm256_slli_epi16(_mm256_and_si256(scales8, _mm256_set1_epi8(0xf)), 1), _mm256_set1_epi8(1));
+ for (int ib = 0; ib < QK_K/32; ++ib) {
+ auto qh32 = (const uint32_t *)qh;
+ auto idx_h = _mm_sllv_epi64(_mm_cvtepu8_epi16(_mm_set1_epi32(qh32[0])), _mm_set_epi64x(4, 8));
+ for (int i = 0; i < 4; ++i) {
+ auto idx_l = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i *)(qs + 8*i)));
+ hidx.vec = _mm_or_si128(idx_l, _mm_and_si128(idx_h, _mm_set1_epi16(0x100))); idx_h = _mm_srli_epi16(idx_h, 1);
+ qx[i] = _mm256_set_epi32(iq3s_grid[hidx.val[7]], iq3s_grid[hidx.val[6]], iq3s_grid[hidx.val[5]], iq3s_grid[hidx.val[4]],
+ iq3s_grid[hidx.val[3]], iq3s_grid[hidx.val[2]], iq3s_grid[hidx.val[1]], iq3s_grid[hidx.val[0]]);
+ }
+ qs += 32; qh += 4;
+ auto signs128 = _mm_loadu_si128((const __m128i*)iq3[ibl].signs + ib);
+ auto signs = MM256_SET_M128I(_mm_srli_epi16(signs128, 4), signs128);
+#ifdef HAVE_FANCY_SIMD
+ auto scales = _mm256_cvtepi8_epi32(_mm_set1_epi32(helper.val[ib]));
+ mask[0] = _mm256_cmpeq_epi8_mask(_mm256_and_si256(signs, smask), smask); signs = _mm256_srli_epi16(signs, 1);
+ mask[1] = _mm256_cmpeq_epi8_mask(_mm256_and_si256(signs, smask), smask); signs = _mm256_srli_epi16(signs, 1);
+ mask[2] = _mm256_cmpeq_epi8_mask(_mm256_and_si256(signs, smask), smask); signs = _mm256_srli_epi16(signs, 1);
+ mask[3] = _mm256_cmpeq_epi8_mask(_mm256_and_si256(signs, smask), smask);
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = _mm256_loadu_si256((const __m256i *)q8.y[iy][ibl].qs + ib);
+ auto sumi = _mm256_setzero_si256();
+ auto ys = _mm256_shuffle_epi32(y, 0x00);
+ sumi = _mm256_dpbusd_epi32(sumi, qx[0], _mm256_mask_sub_epi8(ys, mask[0], _mm256_setzero_si256(), ys));
+ ys = _mm256_shuffle_epi32(y, 0x55);
+ sumi = _mm256_dpbusd_epi32(sumi, qx[1], _mm256_mask_sub_epi8(ys, mask[1], _mm256_setzero_si256(), ys));
+ ys = _mm256_shuffle_epi32(y, 0xaa);
+ sumi = _mm256_dpbusd_epi32(sumi, qx[2], _mm256_mask_sub_epi8(ys, mask[2], _mm256_setzero_si256(), ys));
+ ys = _mm256_shuffle_epi32(y, 0xff);
+ sumi = _mm256_dpbusd_epi32(sumi, qx[3], _mm256_mask_sub_epi8(ys, mask[3], _mm256_setzero_si256(), ys));
+ isum[iy] = _mm256_add_epi32(isum[iy], _mm256_mullo_epi32(sumi, scales));
+ }
+#else
+ auto scales16 = _mm256_cvtepi8_epi16(_mm_set1_epi32(helper.val[ib]));
+ auto scales = _mm256_unpacklo_epi16(scales16, scales16);
+ auto s1 = _mm256_or_si256(_mm256_cmpeq_epi8(_mm256_and_si256(signs, smask), smask), smask); signs = _mm256_srli_epi16(signs, 1);
+ auto s2 = _mm256_or_si256(_mm256_cmpeq_epi8(_mm256_and_si256(signs, smask), smask), smask); signs = _mm256_srli_epi16(signs, 1);
+ auto s3 = _mm256_or_si256(_mm256_cmpeq_epi8(_mm256_and_si256(signs, smask), smask), smask); signs = _mm256_srli_epi16(signs, 1);
+ auto s4 = _mm256_or_si256(_mm256_cmpeq_epi8(_mm256_and_si256(signs, smask), smask), smask);
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = _mm256_loadu_si256((const __m256i *)q8.y[iy][ibl].qs + ib);
+ auto sumi = _mm256_setzero_si256();
+ sumi = _mm256_add_epi16(sumi, _mm256_maddubs_epi16(qx[0], _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x00), s1)));
+ sumi = _mm256_add_epi16(sumi, _mm256_maddubs_epi16(qx[1], _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0x55), s2)));
+ sumi = _mm256_add_epi16(sumi, _mm256_maddubs_epi16(qx[2], _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xaa), s3)));
+ sumi = _mm256_add_epi16(sumi, _mm256_maddubs_epi16(qx[3], _mm256_sign_epi8(_mm256_shuffle_epi32(y, 0xff), s4)));
+ isum[iy] = _mm256_add_epi32(isum[iy], _mm256_madd_epi16(scales, sumi));
+ }
+#endif
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ acc[iy] = _mm256_fmadd_ps(_mm256_mul_ps(d4, _mm256_set1_ps(q8.scale(iy, ibl))), _mm256_cvtepi32_ps(isum[iy]), acc[iy]);
+ isum[iy] = _mm256_setzero_si256();
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto sum = _mm_add_ps(_mm256_castps256_ps128(acc[iy]), _mm256_extractf128_ps(acc[iy], 1));
+ info.store(ix, iy, sum);
+ acc[iy] = _mm256_setzero_ps();
+ }
+ }
+}
+
template <int nrc_y>
static void mul_mat_q4_k_r4_q8_k_avx2(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
GGML_ASSERT(nrc_x%4 == 0);
@@ -5785,50 +5916,6 @@ static void mul_mat_qX_K_q8_K_IQ(int n, const void * vx, size_t bx, const DataIn
#endif
}
-//#ifdef HAVE_FANCY_SIMD
-// Strangely enough, the following implementation makes PP ~6% slower and TG ~6% faster
-// compared to the vanilla AVX2 version below.
-//struct IndexHelperIQ3S {
-// union index_t {
-// __m256i vec;
-// uint16_t val[16];
-// };
-// inline void make2(const uint8_t * qs, const uint8_t * qh, __m256i * values) const {
-// auto idx_l = _mm256_cvtepu8_epi16(_mm_loadu_si128((const __m128i *)qs));
-// const __mmask16 * m16 = (const __mmask16 *)qh;
-// index_t idx;
-// idx.vec = _mm256_mask_add_epi16(idx_l, m16[0], idx_l, offset);
-// values[0] = _mm256_set_epi32(iq3s_grid[idx.val[ 7]], iq3s_grid[idx.val[ 6]], iq3s_grid[idx.val[ 5]], iq3s_grid[idx.val[ 4]],
-// iq3s_grid[idx.val[ 3]], iq3s_grid[idx.val[ 2]], iq3s_grid[idx.val[ 1]], iq3s_grid[idx.val[ 0]]);
-// values[1] = _mm256_set_epi32(iq3s_grid[idx.val[15]], iq3s_grid[idx.val[14]], iq3s_grid[idx.val[13]], iq3s_grid[idx.val[12]],
-// iq3s_grid[idx.val[11]], iq3s_grid[idx.val[10]], iq3s_grid[idx.val[ 9]], iq3s_grid[idx.val[ 8]]);
-// }
-// const __m256i offset = _mm256_set1_epi16(256);
-//};
-//#else
-struct IndexHelperIQ3S {
- union index_t {
- __m256i vec;
- uint32_t val[8];
- };
- inline void make2(const uint8_t * qs, const uint8_t * qh, __m256i * values) const {
- index_t idx;
- auto idx_l = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)qs));
- auto idx_h = _mm256_and_si256(_mm256_sllv_epi32(_mm256_set1_epi32(qh[0]), idx_shift), idx_mask);
- idx.vec = _mm256_or_si256(idx_h, idx_l);
- values[0] = _mm256_set_epi32(iq3s_grid[idx.val[7]], iq3s_grid[idx.val[6]], iq3s_grid[idx.val[5]], iq3s_grid[idx.val[4]],
- iq3s_grid[idx.val[3]], iq3s_grid[idx.val[2]], iq3s_grid[idx.val[1]], iq3s_grid[idx.val[0]]);
- idx_l = _mm256_cvtepu8_epi32(_mm_loadl_epi64((const __m128i *)(qs+8)));
- idx_h = _mm256_and_si256(_mm256_sllv_epi32(_mm256_set1_epi32(qh[1]), idx_shift), idx_mask);
- idx.vec = _mm256_or_si256(idx_h, idx_l);
- values[1] = _mm256_set_epi32(iq3s_grid[idx.val[7]], iq3s_grid[idx.val[6]], iq3s_grid[idx.val[5]], iq3s_grid[idx.val[4]],
- iq3s_grid[idx.val[3]], iq3s_grid[idx.val[2]], iq3s_grid[idx.val[1]], iq3s_grid[idx.val[0]]);
- }
- const __m256i idx_mask = _mm256_set1_epi32(256);
- const __m256i idx_shift = _mm256_set_epi32(1, 2, 3, 4, 5, 6, 7, 8);
-};
-//#endif
-
struct DequantizerIQ3S final : public BaseDequantizer<block_iq3_s> {
DequantizerIQ3S(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {}
@@ -7438,6 +7525,19 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
mm.func16 = mul_mat_iq3_xxs_r4_q8_k<16>;
expected_typeB = GGML_TYPE_Q8_K;
break;
+ case GGML_TYPE_IQ3_S_R4:
+ assert (ne00 % QK_K == 0);
+ mm.funcs[0] = mul_mat_iq3_s_r4_q8_k<1>;
+ mm.funcs[1] = mul_mat_iq3_s_r4_q8_k<2>;
+ mm.funcs[2] = mul_mat_iq3_s_r4_q8_k<3>;
+ mm.funcs[3] = mul_mat_iq3_s_r4_q8_k<4>;
+ mm.funcs[4] = mul_mat_iq3_s_r4_q8_k<5>;
+ mm.funcs[5] = mul_mat_iq3_s_r4_q8_k<6>;
+ mm.funcs[6] = mul_mat_iq3_s_r4_q8_k<7>;
+ mm.funcs[7] = mul_mat_iq3_s_r4_q8_k<8>;
+ mm.func16 = mul_mat_iq3_s_r4_q8_k<16>;
+ expected_typeB = GGML_TYPE_Q8_K;
+ break;
case GGML_TYPE_Q2_K_R4:
assert (ne00 % QK_K == 0);
mm.funcs[0] = mul_mat_q2_k_r4_q8_k<1>;
@@ -10547,6 +10647,82 @@ static void mul_mat_iq3_xxs_r4_q8_k(int n, const void * vx, size_t bx, const Dat
}
}
+template <int nrc_y>
+static void mul_mat_iq3_s_r4_q8_k(int n, const void * vx, size_t bx, const DataInfo& info, int nrc_x) {
+ GGML_ASSERT(nrc_x%4 == 0);
+ Q8<nrc_y, block_q8_K> q8(info);
+ int nbl = n / QK_K;
+ float32x4_t acc[nrc_y] = {};
+ int32x4_t isum[nrc_y] = {};
+ int8x16_t qx[8];
+ auto m1 = vdupq_n_u8(1);
+ auto shuff = vreinterpretq_u8_u32(uint32x4_t{0xffffff00, 0xffffff01, 0xffffff02, 0xffffff03});
+ uint32_t stored_scales[8];
+ for (int ix = 0; ix < nrc_x; ix += 4) {
+ auto iq3 = (const block_iq3_s_r4 *)((const char *)vx + (ix+0)*bx);
+ for (int ibl = 0; ibl < nbl; ++ibl) { // Block of 256
+ auto d4 = vcvt_f32_f16(vld1_f16((const float16_t *)iq3[ibl].d));
+ auto qs = iq3[ibl].qs;
+ auto qh = iq3[ibl].qh;
+ auto scale_bits = vld1q_u8(iq3[ibl].scales);
+ uint8x16x2_t scales8 = { vandq_u8(scale_bits, vdupq_n_u8(0xf)), vshrq_n_u8(scale_bits, 4) };
+ scales8.val[0] = vorrq_u8(vshlq_n_u8(scales8.val[0], 1), m1);
+ scales8.val[1] = vorrq_u8(vshlq_n_u8(scales8.val[1], 1), m1);
+ vst1q_u8_x2((uint8_t *)stored_scales, scales8);
+ for (int ib = 0; ib < QK_K/32; ++ib) {
+ auto signs128 = vld1q_u8(iq3[ibl].signs+16*ib);
+ if constexpr (nrc_y == 1) {
+ auto qh32 = (const uint32_t *)qh;
+ auto idx_h = vreinterpretq_u16_u64(vshlq_u64(vreinterpretq_u64_u16(vmovl_u8(vreinterpret_u8_u32(vdup_n_u32(qh32[0])))), int64x2_t{8, 4}));
+ union { uint16x8_t vec; uint16_t val[8]; } hidx;
+ for (int i = 0; i < 4; ++i) {
+ auto idx_l = vmovl_u8(vld1_u8(qs));
+ hidx.vec = vorrq_u16(idx_l, vandq_u16(idx_h, vdupq_n_u16(0x100))); idx_h = vshrq_n_u16(idx_h, 1);
+ qx[2*i+0] = vreinterpretq_s8_u32(uint32x4_t{iq3s_grid[hidx.val[0]], iq3s_grid[hidx.val[1]], iq3s_grid[hidx.val[2]], iq3s_grid[hidx.val[3]]});
+ auto signs = vreinterpretq_s8_u8(vorrq_u8(vceqq_u8(vandq_u8(signs128, m1), m1), m1));
+ qx[2*i+0] = vmulq_s8(qx[2*i+0], signs);
+ qx[2*i+1] = vreinterpretq_s8_u32(uint32x4_t{iq3s_grid[hidx.val[4]], iq3s_grid[hidx.val[5]], iq3s_grid[hidx.val[6]], iq3s_grid[hidx.val[7]]});
+ signs = vreinterpretq_s8_u8(vorrq_u8(vceqq_u8(vandq_u8(vshrq_n_u8(signs128, 4), m1), m1), m1));
+ qx[2*i+1] = vmulq_s8(qx[2*i+1], signs);
+ signs128 = vshrq_n_u8(signs128, 1);
+ qs += 8;
+ }
+ } else {
+ for (int i = 0; i < 4; ++i) {
+ qx[2*i+0] = vreinterpretq_s8_u32(uint32x4_t{iq3s_grid[qs[0] | ((qh[0] << (8-i)) & 0x100)], iq3s_grid[qs[1] | ((qh[1] << (8-i)) & 0x100)],
+ iq3s_grid[qs[2] | ((qh[2] << (8-i)) & 0x100)], iq3s_grid[qs[3] | ((qh[3] << (8-i)) & 0x100)]});
+ auto signs = vreinterpretq_s8_u8(vorrq_u8(vceqq_u8(vandq_u8(signs128, m1), m1), m1));
+ qx[2*i+0] = vmulq_s8(qx[2*i+0], signs);
+
+ qx[2*i+1] = vreinterpretq_s8_u32(uint32x4_t{iq3s_grid[qs[4] | ((qh[0] << (4-i)) & 0x100)], iq3s_grid[qs[5] | ((qh[1] << (4-i)) & 0x100)],
+ iq3s_grid[qs[6] | ((qh[2] << (4-i)) & 0x100)], iq3s_grid[qs[7] | ((qh[3] << (4-i)) & 0x100)]});
+ signs = vreinterpretq_s8_u8(vorrq_u8(vceqq_u8(vandq_u8(vshrq_n_u8(signs128, 4), m1), m1), m1));
+ qx[2*i+1] = vmulq_s8(qx[2*i+1], signs);
+
+ qs += 8;
+ signs128 = vshrq_n_u8(signs128, 1);
+ }
+ }
+ auto scales = vreinterpretq_s32_u8(vqtbl1q_u8(vreinterpretq_u8_u32(vdupq_n_u32(stored_scales[ib])), shuff));
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ auto y = vld1q_s8_x2(q8.y[iy][ibl].qs + 32*ib);
+ auto sumi = interleaved_dotq(qx, y);
+ isum[iy] = vmlaq_s32(isum[iy], scales, sumi);
+ }
+ qh += 4;
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ acc[iy] = vfmaq_f32(acc[iy], vmulq_f32(d4, vdupq_n_f32(q8.scale(iy, ibl))), vcvtq_f32_s32(isum[iy]));
+ isum[iy] = vdupq_n_s32(0);
+ }
+ }
+ for (int iy = 0; iy < nrc_y; ++iy) {
+ info.store(ix, iy, acc[iy]);
+ acc[iy] = vdupq_n_f32(0.f);
+ }
+ }
+}
+
template <int nrc_y, int k_shift>
inline void iq3_4_add_shift(int ibl, const Q8<nrc_y, block_q8_K>& q8, const int8x16x4_t& i8scales, uint8x16_t extra,
int32x4_t * isum) {
@@ -11864,6 +12040,11 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
m.func16 = mul_mat_iq3_xxs_r4_q8_k<16>;
expected_Btype = GGML_TYPE_Q8_K;
break;
+ case GGML_TYPE_IQ3_S_R4:
+ SET_MUL_MAT_FUNCTIONS(m, mul_mat_iq3_s_r4_q8_k);
+ m.func16 = mul_mat_iq3_s_r4_q8_k<16>;
+ expected_Btype = GGML_TYPE_Q8_K;
+ break;
case GGML_TYPE_Q2_K_R4:
SET_MUL_MAT_FUNCTIONS(m, mul_mat_q2_k_r4_q8_k);
expected_Btype = GGML_TYPE_Q8_K;
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp
index 90417899..095235cd 100644
--- a/ggml/src/iqk/iqk_quantize.cpp
+++ b/ggml/src/iqk/iqk_quantize.cpp
@@ -5696,6 +5696,109 @@ void vec_dot_iq3_xxs_r4_q8_k(int n, float * s, size_t bs, const void * vx, size_
GGML_UNUSED(by);
}
+//
+// ========================================= iq3_s_r4
+//
+
+void quantize_row_iq3_s_r4_ref(const float * x, block_iq3_s_r4 * y, int64_t k) {
+ quantize_iq3_s_r4(x, (void *)y, 4, k/4, nullptr);
+}
+
+void quantize_row_iq3_s_r4(const float * x, void * y, int64_t k) {
+ quantize_iq3_s_r4(x, y, 4, k/4, nullptr);
+}
+
+static void repack_iq3_s(int nrows, int n_per_row, const block_iq3_s * x, block_iq3_s_r4 * y) {
+ GGML_ASSERT(nrows%4 == 0);
+ GGML_ASSERT(n_per_row%QK_K == 0);
+ int nblock = n_per_row/QK_K;
+ const block_iq3_s * x4[4];
+ for (int row = 0; row < nrows; row += 4) {
+ for (int k = 0; k < 4; ++k) x4[k] = x + nblock*k;
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+ std::memset(y[ibl].scales, 0, QK_K/16);
+ std::memset(y[ibl].signs, 0, QK_K/2);
+ std::memset(y[ibl].qh, 0, QK_K/8);
+ for (int k = 0; k < 4; ++k) {
+ y[ibl].d[k] = x4[k][ibl].d;
+ for (int ib = 0; ib < QK_K/64; ++ib) {
+ int j = 8*ib + k;
+ y[ibl].scales[(j+0)%16] |= ((x4[k][ibl].scales[ib] & 0xf) << 4*((j+0)/16));
+ y[ibl].scales[(j+4)%16] |= ((x4[k][ibl].scales[ib] >> 4) << 4*((j+4)/16));
+ }
+ for (int ib = 0; ib < QK_K/32; ++ib) {
+ y[ibl].qh[4*ib+k] = x4[k][ibl].qh[ib]; // leave ot like this?
+ for (int i = 0; i < 4; ++i) {
+ y[ibl].qs[32*ib+k+8*i+0] = x4[k][ibl].qs[8*ib+i+0];
+ y[ibl].qs[32*ib+k+8*i+4] = x4[k][ibl].qs[8*ib+i+4];
+ }
+ for (int i = 0; i < 4; ++i) {
+ y[ibl].signs[16*ib+4*k+i] = (((x4[k][ibl].signs[4*ib+0] >> i) & 1) << 0) | (((x4[k][ibl].signs[4*ib+0] >> (4+i)) & 1) << 1) |
+ (((x4[k][ibl].signs[4*ib+1] >> i) & 1) << 2) | (((x4[k][ibl].signs[4*ib+1] >> (4+i)) & 1) << 3) |
+ (((x4[k][ibl].signs[4*ib+2] >> i) & 1) << 4) | (((x4[k][ibl].signs[4*ib+2] >> (4+i)) & 1) << 5) |
+ (((x4[k][ibl].signs[4*ib+3] >> i) & 1) << 6) | (((x4[k][ibl].signs[4*ib+3] >> (4+i)) & 1) << 7);
+ }
+ }
+ }
+ }
+ x += 4*nblock;
+ y += nblock;
+ }
+}
+
+size_t quantize_iq3_s_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+ GGML_ASSERT(nrows%4 == 0);
+ GGML_ASSERT(n_per_row%QK_K == 0);
+ char * qcur = (char *)dst;
+ auto row_size = ggml_row_size(GGML_TYPE_IQ3_S, n_per_row);
+ std::vector<char> qtmp(4*row_size);
+ for (int row = 0; row < nrows; row += 4) {
+ quantize_iq3_s(src, (void *)qtmp.data(), 4, n_per_row, imatrix);
+ repack_iq3_s(4, n_per_row, (const block_iq3_s *)qtmp.data(), (block_iq3_s_r4 *)qcur);
+ qcur += 4*row_size;
+ src += 4*n_per_row;
+ }
+ return nrows*row_size;
+}
+
+void dequantize_row_iq3_s_r4(const block_iq3_s_r4 * x, float * y, int64_t k) {
+ auto n_per_row = k/4;
+ float * y4[4] = {y, y + n_per_row, y + 2*n_per_row, y + 3*n_per_row};
+ int nblock = n_per_row/QK_K;
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+ for (int k = 0; k < 4; ++k) {
+ const float d = GGML_FP16_TO_FP32(x[ibl].d[k]);
+ for (int ib = 0; ib < QK_K/32; ++ib) {
+ int l = 4*ib + k;
+ float dl = d * (1 + 2*((x[ibl].scales[l%16] >> 4*(l/16)) & 0xf));
+ for (int i = 0; i < 4; ++i) {
+ auto grid1 = (const uint8_t *)(iq3s_grid + x[ibl].qs[32*ib+k+8*i+0] + ((x[ibl].qh[4*ib+k] << (8-i)) & 0x100));
+ auto grid2 = (const uint8_t *)(iq3s_grid + x[ibl].qs[32*ib+k+8*i+4] + ((x[ibl].qh[4*ib+k] << (4-i)) & 0x100));
+ for (int j = 0; j < 4; ++j) {
+ y4[k][QK_K*ibl+32*ib+4*i+ 0+j] = dl * grid1[j] * (x[ibl].signs[16*ib+4*k+j] & (1 << (i+0)) ? -1 : 1);
+ y4[k][QK_K*ibl+32*ib+4*i+16+j] = dl * grid2[j] * (x[ibl].signs[16*ib+4*k+j] & (1 << (i+4)) ? -1 : 1);
+ }
+ }
+ }
+ }
+ }
+}
+
+void vec_dot_iq3_s_r4_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+#if GGML_USE_IQK_MULMAT
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ3_S_R4, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
+ return;
+ }
+#endif
+ GGML_ASSERT(n%QK4_NL == 0);
+ GGML_ASSERT(nrc == 1);
+ GGML_UNUSED(bs);
+ GGML_UNUSED(bx);
+ GGML_UNUSED(by);
+}
+
+//================================================
+
void iqk_repack_tensor(struct ggml_tensor * tensor) {
constexpr int kChunk = 8;
if (!tensor) return;
@@ -5711,6 +5814,11 @@ void iqk_repack_tensor(struct ggml_tensor * tensor) {
{ GGML_TYPE_IQ4_KS, { GGML_TYPE_IQ4_KS_R4, 4, (Repack::repack_func)repack_iq4_ks} },
{ GGML_TYPE_IQ4_NL, { GGML_TYPE_IQ4_NL_R4, 4, (Repack::repack_func)repack_iq4_nl} },
{ GGML_TYPE_IQ2_BN, { GGML_TYPE_IQ2_BN_R4, 4, (Repack::repack_func)repack_iq2_bn} },
+ { GGML_TYPE_IQ2_XXS,{ GGML_TYPE_IQ2_XXS_R4,4, (Repack::repack_func)repack_iq2_xxs} },
+ { GGML_TYPE_IQ2_XS, { GGML_TYPE_IQ2_XS_R4, 4, (Repack::repack_func)repack_iq2_xs} },
+ { GGML_TYPE_IQ2_S, { GGML_TYPE_IQ2_S_R4, 4, (Repack::repack_func)repack_iq2_s} },
+ { GGML_TYPE_IQ3_XXS,{ GGML_TYPE_IQ3_XXS_R4,4, (Repack::repack_func)repack_iq3_xxs} },
+ { GGML_TYPE_IQ3_S, { GGML_TYPE_IQ3_S_R4, 4, (Repack::repack_func)repack_iq3_s} },
{ GGML_TYPE_Q2_K, { GGML_TYPE_Q2_K_R4, 4, (Repack::repack_func)repack_q2_k} },
{ GGML_TYPE_Q3_K, { GGML_TYPE_Q3_K_R4, 4, (Repack::repack_func)repack_q3_k} },
{ GGML_TYPE_Q4_K, { GGML_TYPE_Q4_K_R4, 4, (Repack::repack_func)repack_q4_k} },
diff --git a/ggml/src/iqk/iqk_quantize.h b/ggml/src/iqk/iqk_quantize.h
index f62f055a..70df2dcd 100644
--- a/ggml/src/iqk/iqk_quantize.h
+++ b/ggml/src/iqk/iqk_quantize.h
@@ -193,6 +193,12 @@ size_t quantize_iq3_xxs_r4(const float * GGML_RESTRICT src, void * GGML_RESTRICT
void dequantize_row_iq3_xxs_r4(const block_iq3_xxs_r4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_iq3_xxs_r4_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+void quantize_row_iq3_s_r4_ref(const float * GGML_RESTRICT x, block_iq3_s_r4 * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq3_s_r4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+size_t quantize_iq3_s_r4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
+void dequantize_row_iq3_s_r4(const block_iq3_s_r4 * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
+void vec_dot_iq3_s_r4_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+
void quantize_row_q8_k_r8_ref(const float * GGML_RESTRICT x, block_q8_k_r8 * GGML_RESTRICT y, int64_t k);
void quantize_row_q8_k_r8(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
size_t quantize_q8_k_r8(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
diff --git a/include/llama.h b/include/llama.h
index b7822307..f5f3b8bf 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -192,6 +192,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_IQ2_XS_R4 = 220, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 = 223, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_NL_R4 = 225, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_IQ3_S_R4 = 226, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_M_R4 = 229, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_XS_R4 = 230, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q6_0_R4 = 335, // except 1d tensors
diff --git a/src/llama.cpp b/src/llama.cpp
index 42193411..37653478 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -3884,6 +3884,7 @@ struct llama_model_loader {
case GGML_TYPE_IQ5_K_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ5_K_R4;break;
case GGML_TYPE_IQ6_K: ftype = LLAMA_FTYPE_MOSTLY_IQ6_K; break;
case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
+ case GGML_TYPE_IQ3_S_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ3_S_R4;break;
case GGML_TYPE_Q4_0_4_4: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_4; break;
case GGML_TYPE_Q4_0_4_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_8; break;
case GGML_TYPE_Q4_0_8_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_8_8; break;
@@ -4618,6 +4619,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
case LLAMA_FTYPE_MOSTLY_IQ2_BN: return "IQ2_BN - 2.00 bpw Bitnet";
case LLAMA_FTYPE_MOSTLY_IQ2_BN_R4:return "IQ2_BN_R4 - 2.00 bpw Bitnet";
case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
+ case LLAMA_FTYPE_MOSTLY_IQ3_S_R4: return "IQ3_S_R4 - 3.4375 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4";
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: return "Q4_0_4_8";
@@ -15807,7 +15809,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS_R4) {
new_type = !qs.has_output ? GGML_TYPE_IQ4_K_R4 : GGML_TYPE_Q5_K_R4;
}
- else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS ||
+ else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S_R4 ||
ftype == LLAMA_FTYPE_MOSTLY_IQ4_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_KSS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_KS_R4) && !qs.has_output) {
new_type = GGML_TYPE_IQ5_K;
}
@@ -15871,6 +15873,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
else if (new_type == GGML_TYPE_IQ3_K_R4) {
new_type = GGML_TYPE_IQ3_K;
}
+ else if (new_type == GGML_TYPE_IQ3_S_R4) {
+ new_type = GGML_TYPE_IQ3_S;
+ }
else if (new_type == GGML_TYPE_IQ4_K_R4) {
new_type = GGML_TYPE_IQ4_K;
}
@@ -15955,6 +15960,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S) && qs.model.hparams.n_gqa() >= 2) {
new_type = GGML_TYPE_IQ4_K;
}
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_S_R4 && qs.model.hparams.n_gqa() >= 2) {
+ new_type = GGML_TYPE_IQ4_K_R4;
+ }
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_K && qs.model.hparams.n_gqa() >= 2) {
new_type = GGML_TYPE_IQ4_K;
}
@@ -16008,6 +16016,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
if (new_type == GGML_TYPE_Q2_K || new_type == GGML_TYPE_IQ3_XXS) new_type = GGML_TYPE_IQ3_S;
else if (new_type == GGML_TYPE_Q2_K_R4 || new_type == GGML_TYPE_IQ3_XXS_R4) new_type = GGML_TYPE_IQ3_K_R4;
else if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_IQ3_S ) new_type = GGML_TYPE_Q4_K;
+ else if (new_type == GGML_TYPE_IQ3_S_R4) new_type = GGML_TYPE_Q4_K_R4;
else if (new_type == GGML_TYPE_Q3_K_R4) new_type = GGML_TYPE_Q4_K_R4;
else if (new_type == GGML_TYPE_Q4_K || new_type == GGML_TYPE_IQ4_XS) new_type = GGML_TYPE_Q5_K;
else if (new_type == GGML_TYPE_IQ4_NL) new_type = GGML_TYPE_Q5_K;
@@ -16119,7 +16128,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K || ftype == LLAMA_FTYPE_MOSTLY_Q4_K_R4 ||
ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS_R4 || ftype == LLAMA_FTYPE_MOSTLY_Q3_K_R4 ||
ftype == LLAMA_FTYPE_MOSTLY_Q2_K_R4|| ftype == LLAMA_FTYPE_MOSTLY_IQ4_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4) {
+ ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S_R4) {
new_type = GGML_TYPE_Q5_K;
}
} else {
@@ -16195,7 +16204,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
new_type == GGML_TYPE_IQ4_K_R4|| new_type == GGML_TYPE_Q8_K_R8 || new_type == GGML_TYPE_IQ3_K_R4||
new_type == GGML_TYPE_IQ2_K_R4|| new_type == GGML_TYPE_IQ5_K_R4|| new_type == GGML_TYPE_IQ4_KS_R4 ||
new_type == GGML_TYPE_IQ3_XXS_R4 || new_type == GGML_TYPE_IQ2_XXS_R4 || new_type == GGML_TYPE_IQ2_XS_R4 ||
- new_type == GGML_TYPE_IQ2_S_R4) {
+ new_type == GGML_TYPE_IQ2_S_R4|| new_type == GGML_TYPE_IQ3_S_R4) {
int nx = tensor->ne[0];
int ny = tensor->ne[1];
if (nx % QK_K != 0) {
@@ -16223,6 +16232,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ3_XXS_R4:
case GGML_TYPE_IQ3_S:
+ case GGML_TYPE_IQ3_S_R4:
case GGML_TYPE_IQ1_S:
case GGML_TYPE_IQ1_M:
case GGML_TYPE_Q2_K:
@@ -16384,6 +16394,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_IQ5_K_R4:default_type = GGML_TYPE_IQ5_K_R4;break;
case LLAMA_FTYPE_MOSTLY_IQ6_K: default_type = GGML_TYPE_IQ6_K; break;
case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
+ case LLAMA_FTYPE_MOSTLY_IQ3_S_R4:default_type = GGML_TYPE_IQ3_S_R4;break;
case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: default_type = GGML_TYPE_Q4_0_4_4; break;
case LLAMA_FTYPE_MOSTLY_Q4_0_4_8: default_type = GGML_TYPE_Q4_0_4_8; break;
@@ -16825,6 +16836,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
if (tensor->ne[1] % 4 != 0) new_type = GGML_TYPE_IQ3_XXS;
else chunk_size_multiplier = 4;
}
+ else if (new_type == GGML_TYPE_IQ3_S_R4) {
+ if (tensor->ne[1] % 4 != 0) new_type = GGML_TYPE_IQ3_S;
+ else chunk_size_multiplier = 4;
+ }
else if (new_type == GGML_TYPE_BF16_R16) {
if (tensor->ne[1] % 16 != 0) new_type = GGML_TYPE_BF16;
else chunk_size_multiplier = 16;