summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/quantize/quantize.cpp1
-rw-r--r--ggml/include/ggml.h2
-rw-r--r--ggml/src/ggml-common.h8
-rw-r--r--ggml/src/ggml-cuda.cu1
-rw-r--r--ggml/src/ggml-cuda/common.cuh7
-rw-r--r--ggml/src/ggml-cuda/convert.cu59
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cu75
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cuh5
-rw-r--r--ggml/src/ggml-cuda/mmq.cu4
-rw-r--r--ggml/src/ggml-cuda/mmq.cuh152
-rw-r--r--ggml/src/ggml-cuda/mmvq.cu4
-rw-r--r--ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_ks.cu5
-rw-r--r--ggml/src/ggml-metal.m38
-rw-r--r--ggml/src/ggml-metal.metal180
-rw-r--r--ggml/src/ggml-quants.c1
-rw-r--r--ggml/src/ggml.c22
-rw-r--r--ggml/src/iqk/iqk_gemm_iqk_quants.cpp284
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp5
-rw-r--r--ggml/src/iqk/iqk_quantize.cpp231
-rw-r--r--ggml/src/iqk/iqk_quantize.h6
-rw-r--r--include/llama.h1
-rw-r--r--src/llama.cpp14
22 files changed, 1040 insertions, 65 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 85ceabfd..fd657373 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -76,6 +76,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "IQ2_K_R4", LLAMA_FTYPE_MOSTLY_IQ2_K_R4, "IQ2_K repacked",},
{ "IQ2_KS", LLAMA_FTYPE_MOSTLY_IQ2_KS, " 2.1875 bpw non-linear quantization",},
{ "IQ2_KT", LLAMA_FTYPE_MOSTLY_IQ2_KT, " 2.125 bpw trellis quantization", },
+ { "IQ3_KS", LLAMA_FTYPE_MOSTLY_IQ3_KS, " 3.19 bpw non-linear quantization", },
{ "IQ3_K", LLAMA_FTYPE_MOSTLY_IQ3_K, " 3.44 bpw non-linear quantization", },
{ "IQ3_K_R4", LLAMA_FTYPE_MOSTLY_IQ3_K_R4, "IQ3_K repacked", },
{ "IQ3_KL", LLAMA_FTYPE_MOSTLY_IQ3_KL, " 4 bpw non-linear quantization mix",},
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index 67551eb2..aea0fac5 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -429,6 +429,7 @@ extern "C" {
GGML_TYPE_IQ2_KT = 153,
GGML_TYPE_IQ3_KT = 154,
GGML_TYPE_IQ4_KT = 155,
+ GGML_TYPE_IQ3_KS = 156,
GGML_TYPE_Q4_0_R8 = 202,
GGML_TYPE_Q5_0_R4 = 206,
@@ -521,6 +522,7 @@ extern "C" {
GGML_FTYPE_MOSTLY_IQ2_KT = 142, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ3_KT = 143, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_KT = 144, // except 1d tensors
+ GGML_FTYPE_MOSTLY_IQ3_KS = 145, // except 1d tensors
//
GGML_FTYPE_MOSTLY_Q4_0_R8 = 202, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_0_R8 = 207, // except 1d tensors
diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h
index 2bfe5d39..a1f97911 100644
--- a/ggml/src/ggml-common.h
+++ b/ggml/src/ggml-common.h
@@ -650,6 +650,14 @@ typedef struct {
static_assert(sizeof(block_iq3_k) == sizeof(ggml_half) + 2*sizeof(uint16_t) + QK_K/32 + QK_K/4 + QK_K/8, "wrong iq3_k block size/padding");
typedef struct {
+ uint16_t extra;
+ uint8_t scales[QK_K/64];
+ uint8_t qs[QK_K/4];
+ uint8_t qh[QK_K/8];
+} block_iq3_ks;
+static_assert(sizeof(block_iq3_ks) == sizeof(uint16_t) + QK_K/64 + QK_K/4 + QK_K/8, "wrong iq3_ks block size/padding");
+
+typedef struct {
ggml_half d[4];
uint8_t extra[8];
uint8_t scales_h[QK_K/32];
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
index ae7a55c6..e0035c7a 100644
--- a/ggml/src/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda.cu
@@ -3465,6 +3465,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KSS:
case GGML_TYPE_IQ5_KS:
diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh
index e2690cb3..973af2b8 100644
--- a/ggml/src/ggml-cuda/common.cuh
+++ b/ggml/src/ggml-cuda/common.cuh
@@ -600,6 +600,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ3_K> {
};
template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ3_KS> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR4_XS;
+ static constexpr int qi = QI4_XS;
+};
+
+template<>
struct ggml_cuda_type_traits<GGML_TYPE_IQ4_K> {
static constexpr int qk = QK_K;
static constexpr int qr = QR4_XS;
diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index b40079a3..61c09481 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -1333,6 +1333,51 @@ static __global__ void dequantize_block_iq3_k(const void * __restrict__ vx, dst_
}
}
+template<typename dst_t>
+static __global__ void dequantize_block_iq3_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
+
+ int64_t ii = blockIdx.x;
+ int64_t row = (QK_K * ii) / n_per_row;
+ const char * cx = (const char *)vx + row * row_size;
+ float scale = *(const ggml_half *)cx;
+ const block_iq3_ks * x = (const block_iq3_ks *)(cx + sizeof(ggml_half));
+ const int64_t i = ii - (row*n_per_row)/QK_K;
+
+ const int64_t tid = threadIdx.x;
+ const int64_t is = tid/16;
+ const int64_t il = tid%16;
+ dst_t * y = yy + ii*QK_K + 128*is + 2*il;
+ const uint8_t * qs = x[i].qs + 32*is + 2*il;
+ const uint8_t * qh = x[i].qh + 2*il;
+ uint16_t extra = x[i].extra >> 4*is;
+ const float d0 = scale * (int(((x[i].scales[0] >> 4*is) & 0xf) | ((extra << 4) & 0x10)) - 16);
+ const float d1 = scale * (int(((x[i].scales[1] >> 4*is) & 0xf) | ((extra << 3) & 0x10)) - 16);
+ const float d2 = scale * (int(((x[i].scales[2] >> 4*is) & 0xf) | ((extra << 2) & 0x10)) - 16);
+ const float d3 = scale * (int(((x[i].scales[3] >> 4*is) & 0xf) | ((extra << 1) & 0x10)) - 16);
+ extra >>= 8;
+ const int8_t * values0 = iq3nl_values + ((extra & 1) << 3);
+ const int8_t * values1 = iq3nl_values + ((extra & 2) << 2);
+ const int8_t * values2 = iq3nl_values + ((extra & 4) << 1);
+ const int8_t * values3 = iq3nl_values + ((extra & 8) << 0);
+ if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
+ for (int j = 0; j < 2; ++j) {
+ uint8_t h = qh[j] >> 4*is;
+ y[j+ 0] = __float2bfloat16(d0 * values0[((qs[j] >> 0) & 3) | ((h << 2) & 4)]);
+ y[j+32] = __float2bfloat16(d1 * values1[((qs[j] >> 2) & 3) | ((h << 1) & 4)]);
+ y[j+64] = __float2bfloat16(d2 * values2[((qs[j] >> 4) & 3) | ((h >> 0) & 4)]);
+ y[j+96] = __float2bfloat16(d3 * values3[((qs[j] >> 6) & 3) | ((h >> 1) & 4)]);
+ }
+ } else {
+ for (int j = 0; j < 2; ++j) {
+ uint8_t h = qh[j] >> 4*is;
+ y[j+ 0] = d0 * values0[((qs[j] >> 0) & 3) | ((h << 2) & 4)];
+ y[j+32] = d1 * values1[((qs[j] >> 2) & 3) | ((h << 1) & 4)];
+ y[j+64] = d2 * values2[((qs[j] >> 4) & 3) | ((h >> 0) & 4)];
+ y[j+96] = d3 * values3[((qs[j] >> 6) & 3) | ((h >> 1) & 4)];
+ }
+ }
+}
+
template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
static void dequantize_block_cuda(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
@@ -1574,6 +1619,14 @@ static void dequantize_row_iq3_k_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq3_ks_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
+ const int64_t k = nrows * n_per_row;
+ const int64_t row_size = ggml_row_size(GGML_TYPE_IQ3_KS, n_per_row);
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq3_ks<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
+}
+
+template<typename dst_t>
static void dequantize_row_iq3_k_r4_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
const int64_t row_size = ggml_row_size(GGML_TYPE_IQ3_K, n_per_row);
@@ -1719,6 +1772,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) {
return dequantize_row_iq2_k_cuda<nv_bfloat16>;
case GGML_TYPE_IQ3_K:
return dequantize_row_iq3_k_cuda<nv_bfloat16>;
+ case GGML_TYPE_IQ3_KS:
+ return dequantize_row_iq3_ks_cuda<nv_bfloat16>;
case GGML_TYPE_IQ4_KSS:
return dequantize_row_iq4_kss_cuda<nv_bfloat16>;
case GGML_TYPE_IQ4_KS:
@@ -1821,6 +1876,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_K:
return dequantize_row_iq3_k_cuda;
+ case GGML_TYPE_IQ3_KS:
+ return dequantize_row_iq3_ks_cuda;
case GGML_TYPE_IQ4_K:
return dequantize_row_iq4_k_cuda;
case GGML_TYPE_IQ5_K:
@@ -1916,6 +1973,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_K:
return dequantize_row_iq3_k_cuda;
+ case GGML_TYPE_IQ3_KS:
+ return dequantize_row_iq3_ks_cuda;
case GGML_TYPE_IQ4_K:
return dequantize_row_iq4_k_cuda;
case GGML_TYPE_IQ5_K:
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu
index e69bcc4a..3b1f6acb 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cu
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cu
@@ -1104,6 +1104,73 @@ __device__ __forceinline__ void vec_dot_iq3_k_q8_1(
}
+__device__ __forceinline__ void vec_dot_iq3_ks_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iiqs, float * result) {
+
+ float d = __half2float(*(const half *)vbq);
+ const block_iq3_ks * bq3 = (const block_iq3_ks *)((const char *)vbq + sizeof(half)) + kbx;
+
+ int iqs = iiqs/4;
+ const int ib128 = iqs/4; // 0 or 1. 0 works on quants 0...127, 1 on quants 128...255
+ // Each thread processes 8 quants in each of the 4 32-blocks
+ const int il8 = iqs%4; // 0...3. 0 works on quants 0...7, 1 on quants 8...15, 2 on 16...23, 3 on 24...31
+ const int shift = 4*(il8/2);
+
+ const uint16_t * ql = (const uint16_t *)bq3->qs + 16*ib128 + 4*il8;
+ const uint16_t * qh = (const uint16_t *)bq3->qh + 4*il8;
+
+ int32_t aux32;
+ const uint8_t * aux8 = (const uint8_t *)&aux32;
+
+ uint16_t extra = bq3->extra >> 4*ib128;
+ uint16_t extra_v = extra >> 8;
+
+ const uint16_t * values1 = iq3k_table + ((extra_v << 6) & 0x40);
+ const uint16_t * values2 = iq3k_table + ((extra_v << 5) & 0x40);
+ const uint16_t * values3 = iq3k_table + ((extra_v << 4) & 0x40);
+ const uint16_t * values4 = iq3k_table + ((extra_v << 3) & 0x40);
+
+ const int * q8;
+ int sumi[4] = {0, 0, 0, 0};
+ int v;
+ for (int i = 0; i < 2; ++i) {
+ uint32_t vl = ql[2*i+0] | (ql[2*i+1] << 16);
+ uint32_t vh = ((qh[2*i+0] | (qh[2*i+1] << 16)) >> 4*ib128) << 2;
+
+ q8 = (const int *)bq8_1[4*ib128+0].qs + 2*il8;
+ aux32 = (vl & 0x03030303) | (vh & 0x04040404);
+ v = int_from_table_2(aux8, values1);
+ sumi[0] = ggml_cuda_dp4a(v, q8[i], sumi[0]);
+ vl >>= 2; vh >>= 1;
+
+ q8 += sizeof(block_q8_1)/4;
+ aux32 = (vl & 0x03030303) | (vh & 0x04040404);
+ v = int_from_table_2(aux8, values2);
+ sumi[1] = ggml_cuda_dp4a(v, q8[i], sumi[1]);
+ vl >>= 2; vh >>= 1;
+
+ q8 += sizeof(block_q8_1)/4;
+ aux32 = (vl & 0x03030303) | (vh & 0x04040404);
+ v = int_from_table_2(aux8, values3);
+ sumi[2] = ggml_cuda_dp4a(v, q8[i], sumi[2]);
+ vl >>= 2; vh >>= 1;
+
+ q8 += sizeof(block_q8_1)/4;
+ aux32 = (vl & 0x03030303) | (vh & 0x04040404);
+ v = int_from_table_2(aux8, values4);
+ sumi[3] = ggml_cuda_dp4a(v, q8[i], sumi[3]);
+
+ }
+ const uint16_t * sl16 = (const uint16_t *)bq3->scales;
+ aux32 = __vsub4(((sl16[0] | (sl16[1] << 16)) >> 4*ib128) & 0x0f0f0f0f, 0x10101010);
+ const int8_t * a8 = (const int8_t *)&aux32;
+ *result += d * (__low2float(bq8_1[4*ib128+0].ds) * (a8[0] + ((extra << 4) & 0x10)) * sumi[0] +
+ __low2float(bq8_1[4*ib128+1].ds) * (a8[1] + ((extra << 3) & 0x10)) * sumi[1] +
+ __low2float(bq8_1[4*ib128+2].ds) * (a8[2] + ((extra << 2) & 0x10)) * sumi[2] +
+ __low2float(bq8_1[4*ib128+3].ds) * (a8[3] + ((extra << 1) & 0x10)) * sumi[3]);
+
+}
+
__device__ __forceinline__ void vec_dot_iq1_bn_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs, float * result) {
@@ -1302,6 +1369,14 @@ void mul_mat_vec_iq4_ks_q8_1_cuda(
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_KS, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq4_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
}
+void mul_mat_vec_iq3_ks_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
+
+ iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ3_KS, VDR_IQ3_K_Q8_1_MMVQ, vec_dot_iq3_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+}
+
void mul_mat_vec_iq4_kt_q8_1_cuda(
const void * vx, const void * vy, float * dst, const char * ids_data,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh
index e7c6e1d2..c2416b1e 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cuh
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh
@@ -16,6 +16,11 @@ void mul_mat_vec_iq3_k_q8_1_cuda(
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+void mul_mat_vec_iq3_ks_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+
void mul_mat_vec_iq4_k_q8_1_cuda(
const void * vx, const void * vy, float * dst, const char * ids_data,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu
index deac4d8c..9c206e50 100644
--- a/ggml/src/ggml-cuda/mmq.cu
+++ b/ggml/src/ggml-cuda/mmq.cu
@@ -94,6 +94,9 @@ void ggml_cuda_op_mul_mat_q(
case GGML_TYPE_IQ4_NL:
mul_mat_q_case<GGML_TYPE_IQ4_NL>(ctx, args, stream);
break;
+ case GGML_TYPE_IQ3_KS:
+ mul_mat_q_case<GGML_TYPE_IQ3_KS>(ctx, args, stream);
+ break;
case GGML_TYPE_IQ4_KS:
mul_mat_q_case<GGML_TYPE_IQ4_KS>(ctx, args, stream);
break;
@@ -196,6 +199,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
case GGML_TYPE_IQ1_S_R4:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_NL:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ5_KS:
diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh
index 6def49ef..d6f4cf3a 100644
--- a/ggml/src/ggml-cuda/mmq.cuh
+++ b/ggml/src/ggml-cuda/mmq.cuh
@@ -87,6 +87,7 @@ static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) {
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
@@ -199,6 +200,7 @@ static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml
case GGML_TYPE_IQ1_S_R4: return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_XS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_NL : return MMQ_DP4A_TXS_Q8_0;
+ case GGML_TYPE_IQ3_KS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_KS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_KS_R4 : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ5_KS : return MMQ_DP4A_TXS_Q8_0;
@@ -254,6 +256,7 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) {
case GGML_TYPE_IQ1_S_R4: return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_XS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_NL : return MMQ_MMA_TILE_X_K_Q8_0;
+ case GGML_TYPE_IQ3_KS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_KS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_KS_R4 : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ5_KS : return MMQ_MMA_TILE_X_K_Q8_0;
@@ -2700,63 +2703,90 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
}
}
-//template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq4_ks(
-// const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
-//
-//#ifdef INT8_MMA_AVAILABLE
-// int * x_qs = (int *) x_tile;
-// float * x_df = (float *) (x_qs + WARP_SIZE*2);
-//#else
-// constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ4_XS, mmq_y);
-// int * x_qs = (int *) x_tile;
-// float * x_df = (float *) (x_qs + txs.qs);
-//#endif // INT8_MMA_AVAILABLE
-//
-// const int kbx = 0; // threadIdx.x / QI4_XS
-// const int kqsx = threadIdx.x; // threadIdx.x % QI4_XS
-//
-//#pragma unroll
-// for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
-// int i = i0 + threadIdx.y;
-//
-// if (need_check) {
-// i = min(i, i_max);
-// }
-//
-// const block_iq4_ks * bxi = (const block_iq4_ks *)(x + i*stride + sizeof(float)) + kbx0 + kbx;
-//
-// auto values = iq4k_values + ((bxi->scales[kqsx/4] & 1) << 4);
-// const int aux_q4 = get_int_b4(bxi->qs, kqsx);
-// const int2 v = get_int_from_table_16(aux_q4, values);
-// const int k0 = 8 * (threadIdx.x / 4) + threadIdx.x % 4;
-//#ifdef INT8_MMA_AVAILABLE
-// x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 0] = v.x;
-// x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + k0 + 4] = v.y;
-//#else
-// x_qs[i*(2*WARP_SIZE + 1) + k0 + 0] = v.x;
-// x_qs[i*(2*WARP_SIZE + 1) + k0 + 4] = v.y;
-//#endif // INT8_MMA_AVAILABLE
-// }
-//
-//#pragma unroll
-// for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
-// int i = i0 + threadIdx.y * 4 + threadIdx.x / (WARP_SIZE/4);
-//
-// if (need_check) {
-// i = min(i, i_max);
-// }
-//
-// const float * dptr = (const float *)(x + i*stride);
-// const block_iq4_ks * bxi = (const block_iq4_ks *)(dptr + 1) + kbx0;
-// const int ls = (bxi->scales[threadIdx.x % 8] & 254) - 127;
-//
-//#ifdef INT8_MMA_AVAILABLE
-// x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + threadIdx.x % 8] = dptr[0] * ls;
-//#else
-// x_df[i*(WARP_SIZE/4) + i/4 + threadIdx.x % 8] = dptr[0] * ls;
-//#endif // INT8_MMA_AVAILABLE
-// }
-//}
+static const __device__ uint16_t iq3k_table[128] = {
+ 0xc1c1, 0xc1d8, 0xc1e9, 0xc1f6, 0xc101, 0xc10d, 0xc11c, 0xc12f, 0xd8c1, 0xd8d8, 0xd8e9, 0xd8f6, 0xd801, 0xd80d, 0xd81c, 0xd82f,
+ 0xe9c1, 0xe9d8, 0xe9e9, 0xe9f6, 0xe901, 0xe90d, 0xe91c, 0xe92f, 0xf6c1, 0xf6d8, 0xf6e9, 0xf6f6, 0xf601, 0xf60d, 0xf61c, 0xf62f,
+ 0x01c1, 0x01d8, 0x01e9, 0x01f6, 0x0101, 0x010d, 0x011c, 0x012f, 0x0dc1, 0x0dd8, 0x0de9, 0x0df6, 0x0d01, 0x0d0d, 0x0d1c, 0x0d2f,
+ 0x1cc1, 0x1cd8, 0x1ce9, 0x1cf6, 0x1c01, 0x1c0d, 0x1c1c, 0x1c2f, 0x2fc1, 0x2fd8, 0x2fe9, 0x2ff6, 0x2f01, 0x2f0d, 0x2f1c, 0x2f2f,
+ 0xc5c5, 0xc5dc, 0xc5ed, 0xc5fa, 0xc505, 0xc511, 0xc520, 0xc533, 0xdcc5, 0xdcdc, 0xdced, 0xdcfa, 0xdc05, 0xdc11, 0xdc20, 0xdc33,
+ 0xedc5, 0xeddc, 0xeded, 0xedfa, 0xed05, 0xed11, 0xed20, 0xed33, 0xfac5, 0xfadc, 0xfaed, 0xfafa, 0xfa05, 0xfa11, 0xfa20, 0xfa33,
+ 0x05c5, 0x05dc, 0x05ed, 0x05fa, 0x0505, 0x0511, 0x0520, 0x0533, 0x11c5, 0x11dc, 0x11ed, 0x11fa, 0x1105, 0x1111, 0x1120, 0x1133,
+ 0x20c5, 0x20dc, 0x20ed, 0x20fa, 0x2005, 0x2011, 0x2020, 0x2033, 0x33c5, 0x33dc, 0x33ed, 0x33fa, 0x3305, 0x3311, 0x3320, 0x3333,
+};
+
+__device__ __forceinline__ int int_from_table_2(const uint8_t * a8, const uint16_t * values) {
+ return values[a8[0] | (a8[1] << 3)] | (values[a8[2] | (a8[3] << 3)] << 16);
+}
+
+template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq3_ks(
+ const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
+
+#ifdef INT8_MMA_AVAILABLE
+ int * x_qs = (int *) x_tile;
+ float * x_df = (float *) (x_qs + WARP_SIZE*2);
+#else
+ constexpr tile_x_sizes txs = MMQ_DP4A_TXS_Q8_0_16;
+ int * x_qs = (int *) x_tile;
+ float * x_df = (float *) (x_qs + txs.qs);
+#endif // INT8_MMA_AVAILABLE
+
+ constexpr int qstep = 8;
+ const int kqsx = threadIdx.x % qstep;
+
+ uint32_t aux32[4];
+ const uint8_t * aux8 = (const uint8_t *)aux32;
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/qstep) {
+ int i = i0 + threadIdx.y*(WARP_SIZE/qstep) + threadIdx.x/qstep;
+
+ if (need_check) {
+ i = min(i, i_max);
+ }
+
+ const half * dptr = (const half *)(x + i*stride);
+ const float d = __half2float(dptr[0]);
+ const block_iq3_ks * bxi = (const block_iq3_ks *)(dptr + 1) + kbx0;
+
+ uint16_t extra = bxi->extra >> 8;
+ int qh = get_int_b2(bxi->qh, kqsx);
+
+ #pragma unroll
+ for (int l = 0; l < qstep/4; ++l) {
+
+ const int ql = get_int_b2(bxi->qs, kqsx + qstep*l);
+ aux32[0] = ((ql >> 0) & 0x03030303) | ((qh << 2) & 0x04040404);
+ aux32[1] = ((ql >> 2) & 0x03030303) | ((qh << 1) & 0x04040404);
+ aux32[2] = ((ql >> 4) & 0x03030303) | ((qh >> 0) & 0x04040404);
+ aux32[3] = ((ql >> 6) & 0x03030303) | ((qh >> 1) & 0x04040404);
+
+ const int val0 = int_from_table_2(aux8+ 0, iq3k_table + ((extra << 6) & 0x40));
+ const int val1 = int_from_table_2(aux8+ 4, iq3k_table + ((extra << 5) & 0x40));
+ const int val2 = int_from_table_2(aux8+ 8, iq3k_table + ((extra << 4) & 0x40));
+ const int val3 = int_from_table_2(aux8+12, iq3k_table + ((extra << 3) & 0x40));
+
+ extra >>= 4;
+ qh >>= 4;
+
+#ifdef INT8_MMA_AVAILABLE
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx + 32*l + 0] = val0;
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx + 32*l + 8] = val1;
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx + 32*l + 16] = val2;
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx + 32*l + 24] = val3;
+#else
+ x_qs[i*(2*WARP_SIZE + 1) + kqsx + 32*l + 0] = val0;
+ x_qs[i*(2*WARP_SIZE + 1) + kqsx + 32*l + 8] = val1;
+ x_qs[i*(2*WARP_SIZE + 1) + kqsx + 32*l + 16] = val2;
+ x_qs[i*(2*WARP_SIZE + 1) + kqsx + 32*l + 24] = val3;
+#endif // INT8_MMA_AVAILABLE
+ }
+
+#ifdef INT8_MMA_AVAILABLE
+ x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = d * (int(((bxi->scales[kqsx%4] >> 4*(kqsx/4)) & 0xf) | (((bxi->extra >> kqsx) & 1) << 4)) - 16);
+#else
+ x_df[i*(WARP_SIZE/4) + i/4 + kqsx] = d * (int(((bxi->scales[kqsx%4] >> 4*(kqsx/4)) & 0xf) | (((bxi->extra >> kqsx) & 1) << 4)) - 16);
+#endif // INT8_MMA_AVAILABLE
+ }
+}
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq4_ks(
const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
@@ -3658,6 +3688,13 @@ struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ2_KS> {
};
template <int mmq_x, int mmq_y, int nwarps, bool need_check>
+struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ3_KS> {
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq3_ks<mmq_y, nwarps, need_check>;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma<mmq_x, mmq_y, nwarps, MMQ_Q8_1_DS_LAYOUT_D4>;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
+};
+
+template <int mmq_x, int mmq_y, int nwarps, bool need_check>
struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ4_KS> {
static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq4_ks<mmq_y, nwarps, need_check>;
static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma<mmq_x, mmq_y, nwarps, MMQ_Q8_1_DS_LAYOUT_D4>;
@@ -4142,6 +4179,7 @@ extern DECL_MMQ_CASE(GGML_TYPE_IQ3_S);
extern DECL_MMQ_CASE(GGML_TYPE_IQ1_S);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_NL);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_XS);
+extern DECL_MMQ_CASE(GGML_TYPE_IQ3_KS);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_KS);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_KS_R4);
extern DECL_MMQ_CASE(GGML_TYPE_IQ5_KS_R4);
diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu
index 6412be30..2b619f67 100644
--- a/ggml/src/ggml-cuda/mmvq.cu
+++ b/ggml/src/ggml-cuda/mmvq.cu
@@ -518,6 +518,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm
case GGML_TYPE_IQ3_K:
mul_mat_vec_iq3_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
+ case GGML_TYPE_IQ3_KS:
+ mul_mat_vec_iq3_ks_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+ break;
case GGML_TYPE_IQ4_K:
mul_mat_vec_iq4_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
@@ -679,6 +682,7 @@ bool ggml_cuda_mmvq_type_supported(ggml_type src0_type) {
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ4_KS:
diff --git a/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_ks.cu b/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_ks.cu
new file mode 100644
index 00000000..b8afe899
--- /dev/null
+++ b/ggml/src/ggml-cuda/template-instances/mmq-instance-iq3_ks.cu
@@ -0,0 +1,5 @@
+// This file has been autogenerated by generate_cu_files.py, do not edit manually.
+
+#include "../mmq.cuh"
+
+DECL_MMQ_CASE(GGML_TYPE_IQ3_KS);
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
index ff68ee19..e1e49fcb 100644
--- a/ggml/src/ggml-metal.m
+++ b/ggml/src/ggml-metal.m
@@ -106,6 +106,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_BN,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,
+ GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_KS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_KS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KSS,
@@ -152,6 +153,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_BN_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KSS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ5_KS_F32,
@@ -192,6 +194,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_BN_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KSS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ5_KS_F32,
@@ -229,6 +232,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_BN_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F32,
@@ -266,6 +270,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_BN_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F16,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_KS_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F16,
@@ -303,6 +308,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_BN_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KSS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_KS_F32,
@@ -756,6 +762,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_BN, get_rows_iq2_bn, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL, get_rows_iq4_nl, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_KS, get_rows_iq3_ks, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS, get_rows_iq4_ks, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KSS, get_rows_iq4_kss, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_KS, get_rows_iq5_ks, true);
@@ -802,6 +809,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_BN_F32, mul_mv_iq2_bn_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_NL_F32, mul_mv_iq4_nl_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_KS_F32, mul_mv_iq3_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KS_F32, mul_mv_iq4_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KSS_F32, mul_mv_iq4_kss_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ5_KS_F32, mul_mv_iq5_ks_f32, ctx->support_simdgroup_reduction);
@@ -842,6 +850,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_BN_F32, mul_mv_id_iq2_bn_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_NL_F32, mul_mv_id_iq4_nl_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_KS_F32, mul_mv_id_iq3_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KS_F32, mul_mv_id_iq4_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KSS_F32, mul_mv_id_iq4_kss_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ5_KS_F32, mul_mv_id_iq5_ks_f32, ctx->support_simdgroup_reduction);
@@ -879,6 +888,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_BN_F32, mul_mm_iq2_bn_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32, mul_mm_iq4_nl_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_KS_F32, mul_mm_iq3_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32, mul_mm_iq4_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F32, mul_mm_iq4_kss_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F32, mul_mm_iq5_ks_f32, ctx->support_simdgroup_mm);
@@ -916,6 +926,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_BN_F16, mul_mm_iq2_bn_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F16, mul_mm_iq4_nl_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F16, mul_mm_iq4_xs_f16, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_KS_F16, mul_mm_iq3_ks_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F16, mul_mm_iq4_ks_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F16, mul_mm_iq4_kss_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F16, mul_mm_iq5_ks_f16, ctx->support_simdgroup_mm);
@@ -953,6 +964,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_BN_F32, mul_mm_id_iq2_bn_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32, mul_mm_id_iq4_nl_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_KS_F32, mul_mm_id_iq3_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32, mul_mm_id_iq4_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KSS_F32, mul_mm_id_iq4_kss_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_KS_F32, mul_mm_id_iq5_ks_f32, ctx->support_simdgroup_mm);
@@ -2169,6 +2181,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ2_BN: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_BN_F32 ].pipeline; break;
case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F32 ].pipeline; break;
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break;
+ case GGML_TYPE_IQ3_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F32].pipeline; break;
case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F32 ].pipeline; break;
@@ -2211,6 +2224,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ2_BN: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_BN_F16 ].pipeline; break;
case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_NL_F16 ].pipeline; break;
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F16 ].pipeline; break;
+ case GGML_TYPE_IQ3_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_KS_F16 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F16 ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F16].pipeline; break;
case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F16 ].pipeline; break;
@@ -2428,6 +2442,12 @@ static void ggml_metal_encode_node(
nth1 = 16;
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32].pipeline;
} break;
+ case GGML_TYPE_IQ3_KS:
+ {
+ nth0 = 4;
+ nth1 = 16;
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_KS_F32].pipeline;
+ } break;
case GGML_TYPE_IQ4_KS:
{
nth0 = 4;
@@ -2535,8 +2555,9 @@ static void ggml_metal_encode_node(
src0t == GGML_TYPE_IQ2_KT|| src0t == GGML_TYPE_IQ3_KT) { //|| src0t == GGML_TYPE_IQ4_KT) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
- else if (src0t == GGML_TYPE_IQ2_KS || src0t == GGML_TYPE_IQ2_K || src0t == GGML_TYPE_IQ3_K) {
- const int mem_size = src0t == GGML_TYPE_IQ2_KS ? 64*sizeof(float) : src0t == GGML_TYPE_IQ3_K ? 32*sizeof(float) : 16*sizeof(float);
+ else if (src0t == GGML_TYPE_IQ2_KS || src0t == GGML_TYPE_IQ2_K || src0t == GGML_TYPE_IQ3_K || src0t == GGML_TYPE_IQ3_KS) {
+ const int mem_size = src0t == GGML_TYPE_IQ2_KS ? 64*sizeof(float)
+ : src0t == GGML_TYPE_IQ3_K || src0t == GGML_TYPE_IQ3_KS ? 32*sizeof(float) : 16*sizeof(float);
[encoder setThreadgroupMemoryLength:mem_size atIndex:0];
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
@@ -2648,6 +2669,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ2_BN: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_BN_F32 ].pipeline; break;
case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_NL_F32 ].pipeline; break;
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32 ].pipeline; break;
+ case GGML_TYPE_IQ3_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KSS_F32].pipeline; break;
case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_KS_F32 ].pipeline; break;
@@ -2849,6 +2871,12 @@ static void ggml_metal_encode_node(
nth1 = 2;
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32].pipeline;
} break;
+ case GGML_TYPE_IQ3_KS:
+ {
+ nth0 = 4;
+ nth1 = 16;
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_KS_F32].pipeline;
+ } break;
case GGML_TYPE_IQ4_KS:
{
nth0 = 4;
@@ -2967,8 +2995,9 @@ static void ggml_metal_encode_node(
src0t == GGML_TYPE_IQ2_KT|| src0t == GGML_TYPE_IQ3_KT) { //|| src0t == GGML_TYPE_IQ4_KT) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
- else if (src0t == GGML_TYPE_IQ2_KS || src0t == GGML_TYPE_IQ2_K || src0t == GGML_TYPE_IQ3_K) {
- const int mem_size = src0t == GGML_TYPE_IQ2_KS ? 64*sizeof(float) : src0t == GGML_TYPE_IQ3_K ? 32*sizeof(float) : 16*sizeof(float);
+ else if (src0t == GGML_TYPE_IQ2_KS || src0t == GGML_TYPE_IQ2_K || src0t == GGML_TYPE_IQ3_K || src0t == GGML_TYPE_IQ3_KS) {
+ const int mem_size = src0t == GGML_TYPE_IQ2_KS ? 64*sizeof(float)
+ : src0t == GGML_TYPE_IQ3_K || src0t == GGML_TYPE_IQ3_KS ? 32*sizeof(float) : 16*sizeof(float);
[encoder setThreadgroupMemoryLength:mem_size atIndex:0];
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
@@ -3036,6 +3065,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ2_BN: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_BN ].pipeline; break;
case GGML_TYPE_IQ4_NL: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL ].pipeline; break;
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS ].pipeline; break;
+ case GGML_TYPE_IQ3_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_KS ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KSS].pipeline; break;
case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_KS ].pipeline; break;
diff --git a/ggml/src/ggml-metal.metal b/ggml/src/ggml-metal.metal
index e3bd070d..baaac407 100644
--- a/ggml/src/ggml-metal.metal
+++ b/ggml/src/ggml-metal.metal
@@ -7371,6 +7371,158 @@ kernel void kernel_mul_mv_iq3_k_f32(
kernel_mul_mv_iq3_k_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg);
}
+// TODO
+void kernel_mul_mv_iq3_ks_f32_impl(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ int64_t ne00,
+ int64_t ne01,
+ int64_t ne02,
+ int64_t ne10,
+ int64_t ne12,
+ int64_t ne0,
+ int64_t ne1,
+ uint r2,
+ uint r3,
+ threadgroup int8_t * shared_values,
+ uint3 tgpig,
+ uint tiisg,
+ uint sgitg) {
+
+ const int nb = ne00/QK_K;
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
+
+ const uint i12 = im%ne12;
+ const uint i13 = im/ne12;
+
+ const uint offset0 = (i12/r2)*(nb*ne01) + (i13/r3)*(nb*ne01*ne02);
+
+ const uint row_size = sizeof(half) + nb*sizeof(block_iq3_ks);
+ device const char * cx = (device const char *)src0 + (first_row + offset0)*row_size;
+ device const float * y = (device const float *)src1 + r1*ne10 + im*ne00*ne1;
+
+ threadgroup float * all_values = (threadgroup float *)shared_values + 16*sgitg;
+ {
+ if (tiisg < 16) all_values[tiisg] = kvalues_iq3k_f[tiisg];
+ simdgroup_barrier(mem_flags::mem_none);
+ }
+
+ float yl[32];
+ float sumf[N_DST]={0.f};
+ float d[N_DST];
+
+ const int ix = tiisg/8; // 0...3
+ const int it = tiisg%8; // 0...7
+ const int iq = it/4; // 0 or 1
+ const int ir = it%4; // 0...3
+
+ device const half * dptr = (device const half *)cx;
+ d[0] = (float)dptr[0];
+ for (int i = 1; i < N_DST; ++i) {
+ dptr += row_size/2;
+ d[i] = (float)dptr[0];
+ }
+
+ device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir;
+
+ uint32_t vl[2], vh[2];
+ uint32_t aux32[2];
+ thread const uint8_t * aux8 = (thread const uint8_t *)aux32;
+
+ for (int ib = ix; ib < nb; ib += 4) {
+
+ for (int i = 0; i < 8; ++i) {
+ yl[i+ 0] = y4[i+ 0];
+ yl[i+ 8] = y4[i+32];
+ yl[i+16] = y4[i+64];
+ yl[i+24] = y4[i+96];
+ }
+
+ for (int row = 0; row < N_DST; row++) {
+
+ device const block_iq3_ks * x = (device const block_iq3_ks *)(cx + row_size*row + sizeof(half));
+ device const block_iq3_ks & xb = x[ib];
+ device const uint16_t * ql16 = (device const uint16_t *)xb.qs + 16*iq + 4*ir;
+ device const uint16_t * qh16 = (device const uint16_t *)xb.qh + 4*ir;
+ device const uint16_t * sc16 = (device const uint16_t *)xb.scales;
+
+ uint8_t extra_s = (xb.extra & 0xff) >> 4*iq;
+ uint8_t extra_v = xb.extra >> (8 + 4*iq);
+
+ uint32_t scales32 = sc16[0] | (sc16[1] << 16);
+ scales32 = (scales32 >> 4*iq) & 0x0f0f0f0f;
+ thread int8_t * s8 = (thread int8_t *)&scales32;
+ s8[0] += ((extra_s << 4) & 0x10) - 16;
+ s8[1] += ((extra_s << 3) & 0x10) - 16;
+ s8[2] += ((extra_s << 2) & 0x10) - 16;
+ s8[3] += ((extra_s << 1) & 0x10) - 16;
+
+ vl[0] = ql16[0] | ql16[1] << 16;
+ vl[1] = ql16[2] | ql16[3] << 16;
+ vh[0] = ((qh16[0] | (qh16[1] << 16)) << 4*(1-iq)) >> 2;
+ vh[1] = ((qh16[2] | (qh16[3] << 16)) << 4*(1-iq)) >> 2;
+
+ float4 acc = {0.f};
+ for (int l = 0; l < 4; ++l) {
+ threadgroup const float * values = all_values + ((extra_v & 1) << 3);
+ aux32[0] = (vl[0] & 0x03030303) | (vh[0] & 0x04040404);
+ aux32[1] = (vl[1] & 0x03030303) | (vh[1] & 0x04040404);
+ for (int j = 0; j < 8; ++j) acc[l] += yl[8*l+j] * values[aux8[j]];
+ vl[0] >>= 2; vl[1] >>= 2;
+ vh[0] >>= 1; vh[1] >>= 1;
+ extra_v >>= 1;
+ }
+
+ sumf[row] += d[row] * (acc[0] * s8[0] + acc[1] * s8[1] + acc[2] * s8[2] + acc[3] * s8[3]);
+
+ }
+
+ y4 += 4 * QK_K;
+ }
+
+ for (int row = 0; row < N_DST; row += 2) {
+ float2 tmp{sumf[row], sumf[row+1]};
+ tmp = simd_sum(tmp);
+ if (tiisg < 2) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + row + tiisg] = tmp[tiisg];
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq3_ks_f32")]]
+kernel void kernel_mul_mv_iq3_ks_f32(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01,
+ constant int64_t & ne02,
+ constant uint64_t & nb00,
+ constant uint64_t & nb01,
+ constant uint64_t & nb02,
+ constant int64_t & ne10,
+ constant int64_t & ne11,
+ constant int64_t & ne12,
+ constant uint64_t & nb10,
+ constant uint64_t & nb11,
+ constant uint64_t & nb12,
+ constant int64_t & ne0,
+ constant int64_t & ne1,
+ constant uint & r2,
+ constant uint & r3,
+ threadgroup int8_t * shared_values [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq3_ks_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg);
+}
+
void kernel_mul_mv_iq4_k_f32_impl(
device const void * src0,
device const float * src1,
@@ -8689,6 +8841,29 @@ void dequantize_iq3_k(device const block_iq3_k * xb, short il, thread type4x4 &
}
template <typename type4x4>
+void dequantize_iq3_ks(device const block_iq3_ks * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256
+ int ib32 = il/2;
+ device const uint16_t * q16l = (device const uint16_t *)xb->qs + 16*(il/8) + 8*(il&1);
+ device const uint16_t * q16h = (device const uint16_t *)xb->qh + 8*(il&1);
+
+ int8_t ls = int8_t(((xb->scales[ib32%4] >> 4*(ib32/4)) & 0xf) | (((xb->extra >> ib32) & 1) << 4)) - 16;
+ half d = ls;
+
+ constant half * values = kvalues_iq3k_h + 8*((xb->extra >> (8+ib32)) & 1);
+
+ const int shift = 2*((il%8)/2);
+ uint32_t aux32;
+ thread const uint8_t * aux8 = (thread const uint8_t *)&aux32;
+ for (int i = 0; i < 4; ++i) {
+ uint32_t vl = q16l[2*i+0] | (q16l[2*i+1] << 16);
+ uint32_t vh = q16h[2*i+0] | (q16h[2*i+1] << 16);
+ aux32 = ((vl >> shift) & 0x03030303) | (((vh >> ((il/2)%8)) << 2) & 0x04040404);
+ for (int j = 0; j < 4; ++j) reg[i][j] = d * values[aux8[j]];
+ }
+}
+
+template <typename type4x4>
void dequantize_iq4_k(device const block_iq4_k * xb, short il, thread type4x4 & reg) {
// il is 0...15 for QK_K = 256 => index of block of 32 is il/2
const int ib32 = il/2;
@@ -9416,6 +9591,7 @@ template [[host_name("kernel_get_rows_iq5_k")]] kernel get_rows_q_t kernel_get
template [[host_name("kernel_get_rows_iq6_k")]] kernel get_rows_q_t kernel_get_rows_q<block_iq6_k, QK_NL, dequantize_iq6_k>;
template [[host_name("kernel_get_rows_iq1_bn")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRSBN<float4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>>;
template [[host_name("kernel_get_rows_iq2_bn")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRSBN<float4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>>;
+template [[host_name("kernel_get_rows_iq3_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq3_ks, half, 16, dequantize_iq3_ks>>;
template [[host_name("kernel_get_rows_iq4_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>>;
template [[host_name("kernel_get_rows_iq5_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>>;
template [[host_name("kernel_get_rows_iq4_kss")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>>;
@@ -9463,6 +9639,7 @@ template [[host_name("kernel_mul_mm_iq5_k_f32")]] kernel mat_mm_t kernel_mul_m
template [[host_name("kernel_mul_mm_iq6_k_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DD<block_iq6_k, QK_NL, dequantize_iq6_k>, float>;
template [[host_name("kernel_mul_mm_iq1_bn_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>, float>;
template [[host_name("kernel_mul_mm_iq2_bn_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>, float>;
+template [[host_name("kernel_mul_mm_iq3_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq3_ks, half, 16, dequantize_iq3_ks>, float>;
template [[host_name("kernel_mul_mm_iq4_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>, float>;
template [[host_name("kernel_mul_mm_iq5_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>, float>;
template [[host_name("kernel_mul_mm_iq4_kss_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>, float>;
@@ -9501,6 +9678,7 @@ template [[host_name("kernel_mul_mm_iq5_k_f16")]] kernel mat_mm_t kernel_mul_m
template [[host_name("kernel_mul_mm_iq6_k_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DD<block_iq6_k, QK_NL, dequantize_iq6_k>, half>;
template [[host_name("kernel_mul_mm_iq1_bn_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>, half>;
template [[host_name("kernel_mul_mm_iq2_bn_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>, half>;
+template [[host_name("kernel_mul_mm_iq3_ks_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq3_ks, half, 16, dequantize_iq3_ks>, half>;
template [[host_name("kernel_mul_mm_iq4_ks_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>, half>;
template [[host_name("kernel_mul_mm_iq5_ks_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>, half>;
template [[host_name("kernel_mul_mm_iq4_kss_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>, half>;
@@ -9546,6 +9724,7 @@ template [[host_name("kernel_mul_mm_id_iq5_k_f32")]] kernel mat_mm_id_t kernel
template [[host_name("kernel_mul_mm_id_iq6_k_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DD<block_iq6_k, QK_NL, dequantize_iq6_k>>;
template [[host_name("kernel_mul_mm_id_iq1_bn_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRSBN<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>>;
template [[host_name("kernel_mul_mm_id_iq2_bn_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRSBN<half4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>>;
+template [[host_name("kernel_mul_mm_id_iq3_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq3_ks, half, 16, dequantize_iq3_ks>>;
template [[host_name("kernel_mul_mm_id_iq4_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>>;
template [[host_name("kernel_mul_mm_id_iq5_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>>;
template [[host_name("kernel_mul_mm_id_iq4_kss_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>>;
@@ -9766,6 +9945,7 @@ template [[host_name("kernel_mul_mv_id_iq3_s_f32")]] kernel kernel_mul_mv_id_t
template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
+template [[host_name("kernel_mul_mv_id_iq3_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq3_ks_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_ks_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq5_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq5_ks_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_kss_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_kss_f32_impl>>;
diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c
index 220c0c99..96242727 100644
--- a/ggml/src/ggml-quants.c
+++ b/ggml/src/ggml-quants.c
@@ -15425,6 +15425,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
case GGML_TYPE_IQ3_KT: break;
case GGML_TYPE_IQ4_KT: break;
case GGML_TYPE_IQ3_K: break;
+ case GGML_TYPE_IQ3_KS: break;
case GGML_TYPE_IQ4_K: break;
case GGML_TYPE_IQ5_K: break;
case GGML_TYPE_IQ6_K: break;
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 97b5bff7..2e6983df 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1656,6 +1656,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 0,
},
+ [GGML_TYPE_IQ3_KS] = {
+ .type_name = "iq3_ks",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq3_ks),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq3_ks,
+ .from_float = quantize_row_iq3_ks,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq3_ks_ref,
+ .vec_dot = vec_dot_iq3_ks_q8_k,
+ .vec_dot_type = GGML_TYPE_Q8_K,
+ .nrows = 1,
+ .row_meta_size = 2,
+ },
[GGML_TYPE_IQ4_K] = {
.type_name = "iq4_k",
.blck_size = QK_K,
@@ -4578,6 +4591,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ3_KT: wtype = GGML_TYPE_IQ3_KT; break;
case GGML_FTYPE_MOSTLY_IQ4_KT: wtype = GGML_TYPE_IQ4_KT; break;
case GGML_FTYPE_MOSTLY_IQ3_K: wtype = GGML_TYPE_IQ3_K; break;
+ case GGML_FTYPE_MOSTLY_IQ3_KS: wtype = GGML_TYPE_IQ3_KS; break;
case GGML_FTYPE_MOSTLY_IQ4_K: wtype = GGML_TYPE_IQ4_K; break;
case GGML_FTYPE_MOSTLY_IQ3_K_R4: wtype = GGML_TYPE_IQ3_K_R4; break;
case GGML_FTYPE_MOSTLY_IQ4_K_R4: wtype = GGML_TYPE_IQ4_K_R4; break;
@@ -11347,6 +11361,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_K_R4:
@@ -11824,6 +11839,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_K_R4:
@@ -11998,6 +12014,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_K_R4:
@@ -15499,6 +15516,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_K_R4:
@@ -15913,6 +15931,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_K_R4:
@@ -16233,6 +16252,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_K_R4:
@@ -16870,6 +16890,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ3_KT:
case GGML_TYPE_IQ4_KT:
case GGML_TYPE_IQ3_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_K_R4:
@@ -23943,6 +23964,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ3_KT: result = quantize_iq3_kt (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_KT: result = quantize_iq4_kt (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ3_K: result = quantize_iq3_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ3_KS: result = quantize_iq3_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_K: result = quantize_iq4_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ3_K_R4:result = quantize_iq3_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_K_R4:result = quantize_iq4_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
diff --git a/ggml/src/iqk/iqk_gemm_iqk_quants.cpp b/ggml/src/iqk/iqk_gemm_iqk_quants.cpp
index 2105ac00..57038d0c 100644
--- a/ggml/src/iqk/iqk_gemm_iqk_quants.cpp
+++ b/ggml/src/iqk/iqk_gemm_iqk_quants.cpp
@@ -1,4 +1,5 @@
#include "iqk_gemm_iqk_quants.h"
+#include <cstring>
#ifdef IQK_IMPLEMENT
@@ -214,6 +215,68 @@ struct DequantizerIQ3K final : public BaseDequantizer<block_iq3_k> {
constexpr static uint8_t k_shuff[16] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
};
+struct DequantizerIQ3KS final : public BaseDequantizer<block_iq3_ks, true, true> {
+ DequantizerIQ3KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_values()) {}
+ template <typename Q8>
+ inline void compute_block(int i, const Q8& q8, __m512 * acc) {
+ uint32_t aux32; std::memcpy(&aux32, x[i].scales, 4);
+ auto scl = _mm_srlv_epi32(_mm_set1_epi32(aux32), _mm_set_epi32(0, 0, 4, 0));
+ auto scales128 = _mm_cvtepu8_epi16(_mm_and_si128(scl, _mm_set1_epi8(0xf)));
+ scales128 = _mm_mask_add_epi16(scales128, __mmask8(x[i].extra & 0xff), scales128, _mm_set1_epi16(16));
+ scales128 = _mm_sub_epi16(scales128, _mm_set1_epi16(16));
+ auto shifts = _mm_mask_add_epi16(m64, __mmask8(x[i].extra >> 8), m64, _mm_set1_epi16(4));
+ auto mins128 = _mm_mullo_epi16(scales128, shifts);
+ auto mins = MM256_SET_M128I(_mm_shuffle_epi8(mins128, s8k.shuffles[1]), _mm_shuffle_epi8(mins128, s8k.shuffles[0]));
+ auto scales256 = MM256_SET_M128I(scales128, scales128);
+ auto all_scales = _mm512_inserti32x8(_mm512_castsi256_si512(scales256), scales256, 1);
+ __m512i scales[4];
+ for (int k = 0; k < 4; ++k) scales[k] = _mm512_shuffle_epi8(all_scales, shuffles[k]);
+ prepare(x[i].qs, x[i].qh);
+ for (int iy = 0; iy < Q8::nrc_y; ++iy) {
+ auto q8s = q8.load_bsums(iy, i);
+ auto prod = _mm256_madd_epi16(mins, q8s);
+ auto sumi = _mm512_inserti32x8(_mm512_setzero_si512(), prod, 0);
+ for (int k = 0; k < 4; ++k) {
+ auto p = _mm512_maddubs_epi16(bits.values[k], q8.load_quants64(iy, i, k));
+ sumi = _mm512_dpwssd_epi32(sumi, p, scales[k]);
+ }
+ acc[iy] = _mm512_fmadd_ps(_mm512_set1_ps(d*q8.scale(iy, i)), _mm512_cvtepi32_ps(sumi), acc[iy]);
+ }
+ }
+ inline void prepare(const uint8_t * q2, const uint8_t * qh) {
+ bits.prepare(q2);
+ auto h256 = _mm256_loadu_si256((const __m256i *)qh);
+ auto hbits = _mm512_inserti32x8(_mm512_castsi256_si512(h256), _mm256_srli_epi16(h256, 1), 1);
+ bits.values[0] = _mm512_or_si512(bits.values[0], _mm512_and_si512(_mm512_slli_epi16(hbits, 2), hmask));
+ bits.values[1] = _mm512_or_si512(bits.values[1], _mm512_and_si512(hbits, hmask));
+ bits.values[2] = _mm512_or_si512(bits.values[2], _mm512_and_si512(_mm512_srli_epi16(hbits, 2), hmask));
+ bits.values[3] = _mm512_or_si512(bits.values[3], _mm512_and_si512(_mm512_srli_epi16(hbits, 4), hmask));
+ bits.values[0] = _mm512_shuffle_epi8(values, bits.values[0]);
+ bits.values[1] = _mm512_shuffle_epi8(values, bits.values[1]);
+ bits.values[2] = _mm512_shuffle_epi8(values, bits.values[2]);
+ bits.values[3] = _mm512_shuffle_epi8(values, bits.values[3]);
+ }
+ static inline __m512i load_values() {
+ static const uint8_t kvalues_iq3nl[16] = {1, 24, 41, 54, 65, 77, 92, 111, 5, 28, 45, 58, 69, 81, 96, 115};
+ auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq3nl);
+ auto val256 = MM256_SET_M128I(val128, val128);
+ return _mm512_inserti32x8(_mm512_castsi256_si512(val256), val256, 1);
+ }
+
+ Q2Bits bits;
+ Scales8KBase s8k;
+
+ const __m128i m64 = _mm_set1_epi16(-64);
+ const __m512i values;
+ const __m512i hmask = _mm512_set1_epi8(4);
+ const __m512i shuffles[4] = {
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0100), _mm256_set1_epi16(0x0302), 1),
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0504), _mm256_set1_epi16(0x0706), 1),
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0908), _mm256_set1_epi16(0x0b0a), 1),
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0d0c), _mm256_set1_epi16(0x0f0e), 1),
+ };
+};
+
struct DequantizerIQ4KSS final : public BaseDequantizer<block_iq4_kss, true> {
DequantizerIQ4KSS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_512()) {}
template <typename Q8>
@@ -922,6 +985,40 @@ struct DequantizerIQ3K final : public BaseDequantizer<block_iq3_k> {
constexpr static uint8_t k_shuff[16] = {0, 4, 8, 12, 1, 5, 9, 13, 2, 6, 10, 14, 3, 7, 11, 15};
};
+struct DequantizerIQ3KS final : public BaseDequantizer<block_iq3_ks, true, true> {
+ DequantizerIQ3KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_values()) {}
+ template <typename Q8>
+ inline __m256i new_block(int i, [[maybe_unused]] const Q8& q8, [[maybe_unused]] __m256 * accd) {
+ uint32_t aux32; std::memcpy(&aux32, x[i].scales, 4);
+ auto scl = _mm_cvtepi8_epi16(_mm_and_si128(_mm_srlv_epi32(_mm_set1_epi32(aux32), _mm_set_epi32(0, 0, 4, 0)), _mm_set1_epi8(0xf)));
+ auto sch = _mm_cmpeq_epi16(_mm_and_si128(_mm_set1_epi16(x[i].extra), mask), mask);
+ auto scales128 = _mm_add_epi16(scl, _mm_and_si128(sch, _mm_set1_epi16(16)));
+ scales128 = _mm_sub_epi16(scales128, _mm_set1_epi16(16));
+ return MM256_SET_M128I(scales128, scales128);
+ }
+ inline void prepare(int i, int j) {
+ uint8_t extra = x[i].extra >> (8 + 4*j);
+ hbits = j == 0 ? _mm256_loadu_si256((const __m256i *)x[i].qh) : _mm256_srli_epi16(hbits, 4);
+ bits.prepare(x[i].qs, j);
+ bits.values[0] = _mm256_add_epi8(_mm256_set1_epi8((extra << 3) & 8), _mm256_or_si256(bits.values[0], _mm256_and_si256(_mm256_slli_epi16(hbits, 2), mh)));
+ bits.values[1] = _mm256_add_epi8(_mm256_set1_epi8((extra << 2) & 8), _mm256_or_si256(bits.values[1], _mm256_and_si256(_mm256_slli_epi16(hbits, 1), mh)));
+ bits.values[2] = _mm256_add_epi8(_mm256_set1_epi8((extra << 1) & 8), _mm256_or_si256(bits.values[2], _mm256_and_si256(hbits, mh)));
+ bits.values[3] = _mm256_add_epi8(_mm256_set1_epi8((extra << 0) & 8), _mm256_or_si256(bits.values[3], _mm256_and_si256(_mm256_srli_epi16(hbits, 1), mh)));
+ for (int k = 0; k < 4; ++k) bits.values[k] = _mm256_shuffle_epi8(values, bits.values[k]);
+ }
+ inline __m256i load_values() {
+ auto v = _mm_loadu_si128((const __m128i *)iq3nl_values);
+ return MM256_SET_M128I(v, v);
+ }
+
+
+ Q2Bits bits;
+ __m256i hbits;
+ const __m256i values;
+ const __m256i mh = _mm256_set1_epi8(4);
+ const __m128i mask = _mm_setr_epi16(0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80);
+};
+
struct DequantizerIQ4KSS final : public BaseDequantizer<block_iq4_kss, true> {
DequantizerIQ4KSS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_256()) {}
template <typename Q8>
@@ -1286,7 +1383,7 @@ static void mul_mat_qX_K_q8_K_T(int n, const void * vx, size_t bx, const DataInf
set_scales_8(all_scales, j, scales);
- if constexpr (std::is_same_v<Dequantizer, DequantizerIQ4KS>) {
+ if constexpr (std::is_same_v<Dequantizer, DequantizerIQ4KS> || std::is_same_v<Dequantizer, DequantizerIQ3KS>) {
multiply_add_avx2(deq.bits, scales, j, i, q8, sumi);
} else {
multiply_add(deq.bits, scales, j, i, q8, sumi);
@@ -2030,6 +2127,7 @@ static void mul_mat_iq5_ks_r4_q8_k(int n, const void * vx, size_t bx, const Data
template <typename Dequantizer> void set_functions(std::array<mul_mat_t, IQK_MAX_NY>& funcs) {
#ifdef HAVE_FANCY_SIMD
if constexpr (std::is_same_v<Dequantizer, DequantizerIQ2KS> ||
+ std::is_same_v<Dequantizer, DequantizerIQ3KS> ||
std::is_same_v<Dequantizer, DequantizerIQ4KS> ||
std::is_same_v<Dequantizer, DequantizerIQ5KS>) {
IQK_SET_MUL_MAT_FUNCTIONS_T(mul_mat_iqX_k_q8_K_AVX512_new, Dequantizer, funcs)
@@ -2307,6 +2405,65 @@ void iqk_convert_iq3_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int
}
}
+void iqk_convert_iq3_ks_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
+ GGML_ASSERT(n%QK_K == 0);
+ GGML_ASSERT(nrc_x%8 == 0);
+
+ int nb = n/QK_K;
+
+ const block_iq3_ks * x8[8];
+
+ block_q8_k_r8 * y = (block_q8_k_r8 *)vy;
+
+ __m256i values;
+ {
+ auto v = _mm_loadu_si128((const __m128i *)iq3nl_values);
+ values = MM256_SET_M128I(v, v);
+ }
+
+ ggml_half drow[8];
+ float dnew[8];
+ int16_t ls[16];
+
+ __m256i xv[8];
+ uint32_t block[8];
+
+ for (int ix = 0; ix < nrc_x; ix += 8) {
+ for (int k = 0; k < 8; ++k) {
+ const ggml_half * dptr = (const ggml_half *)((const char *)vx + (ix + k)*bx);
+ drow[k] = dptr[0];
+ x8[k] = (const block_iq3_ks *)(dptr + 1);
+ }
+ auto vd = _mm256_cvtph_ps(_mm_loadu_si128((const __m128i *)drow));
+ for (int i = 0; i < nb; ++i) {
+ for (int k = 0; k < 8; ++k) {
+ auto hbits = _mm256_loadu_si256((const __m256i *)x8[k][i].qh);
+ auto extra = x8[k][i].extra;
+ uint8_t extra_v = extra >> 8;
+ for (int j = 0; j < 4; ++j) {
+ ls[2*j+0] = ls[2*j+1] = ((x8[k][i].scales[j] & 0xf) | ((extra << 4) & 0x10)) - 16;
+ ls[2*j+8] = ls[2*j+9] = ((x8[k][i].scales[j] >> 4) | ((extra << 0) & 0x10)) - 16;
+ extra >>= 1;
+ }
+ for (int i128 = 0; i128 < QK_K/128; ++i128) {
+ auto lbits = _mm256_loadu_si256((const __m256i *)x8[k][i].qs + i128);
+ for (int j = 0; j < 4; ++j) {
+ xv[4*i128+j] = _mm256_or_si256(_mm256_and_si256(lbits, _mm256_set1_epi8(3)), _mm256_and_si256(_mm256_slli_epi16(hbits, 2), _mm256_set1_epi8(4)));
+ xv[4*i128+j] = _mm256_add_epi8(xv[4*i128+j], _mm256_set1_epi8((extra_v & 1) << 3));
+ xv[4*i128+j] = _mm256_shuffle_epi8(values, xv[4*i128+j]);
+ extra_v >>= 1;
+ lbits = _mm256_srli_epi16(lbits, 2);
+ hbits = _mm256_srli_epi16(hbits, 1);
+ }
+ }
+ dnew[k] = convert_to_q8_k_r8(k, 1.f/127, xv, ls, block, y[i].qs);
+ }
+ _mm_storeu_si128((__m128i *)y[i].d, _mm256_cvtps_ph(_mm256_mul_ps(vd, _mm256_loadu_ps(dnew)), _MM_ROUND_NEAREST));
+ }
+ y += nb;
+ }
+}
+
void iqk_convert_iq4_ks_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
GGML_ASSERT(n%QK_K == 0);
GGML_ASSERT(nrc_x%8 == 0);
@@ -2730,6 +2887,7 @@ bool iqk_convert_iqk_quants_q80_r8(int type, int n, const void * vx, size_t bx,
switch (ggml_type(type)) {
case GGML_TYPE_IQ2_KS : iqk_convert_iq2_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ2_K : iqk_convert_iq2_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
+ case GGML_TYPE_IQ3_KS : iqk_convert_iq3_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ3_K : iqk_convert_iq3_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ4_KS : iqk_convert_iq4_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ4_K : iqk_convert_iq4_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
@@ -2758,6 +2916,9 @@ bool iqk_set_kernels_iqk_quants(int ne00, int typeA, int typeB, std::array<mul_m
case GGML_TYPE_IQ2_K:
set_functions<DequantizerIQ2K>(kernels);
break;
+ case GGML_TYPE_IQ3_KS:
+ set_functions<DequantizerIQ3KS>(kernels);
+ break;
case GGML_TYPE_IQ3_K:
set_functions<DequantizerIQ3K>(kernels);
break;
@@ -3080,6 +3241,58 @@ struct DequantizerIQ3K final : public BaseDequantizer<block_iq3_k> {
};
+struct DequantizerIQ3KS final : public BaseDequantizer<block_iq3_ks, true, true> {
+
+ DequantizerIQ3KS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc), values(load_values()) {}
+
+ constexpr static int num_blocks() { return 8; }
+ constexpr static bool should_scale_quants() { return false; }
+
+ template <typename Q8>
+ inline int32x4x2_t new_block(int i, const Q8& q8, float32x4_t * acc) {
+ (void)q8;
+ (void)acc;
+ uint32_t aux32; std::memcpy(&aux32, x[i].scales, 4);
+ auto scl8 = vand_s8(vreinterpret_s8_u32(uint32x2_t{aux32, aux32 >> 4}), vdup_n_s8(0xf));
+ auto sch8 = vdup_n_u8(x[i].extra & 0xff);
+ sch8 = vand_u8(vceq_u8(vand_u8(sch8, shmask), shmask), vdup_n_u8(16));
+ scl8 = vsub_s8(vadd_s8(scl8, vreinterpret_s8_u8(sch8)), vdup_n_s8(16));
+ auto scales16 = vmovl_s8(scl8);
+ int32x4x2_t scales = {vmovl_s16(vget_low_s16(scales16)), vmovl_s16(vget_high_s16(scales16))};
+ return scales;
+ }
+ inline void prepare(int i, int j) {
+ bits.prepare(x[i].qs+32*j);
+ if (j == 0) {
+ hbits = vld1q_u8_x2(x[i].qh);
+ }
+ else {
+ hbits.val[0] = vshrq_n_u8(hbits.val[0], 4);
+ hbits.val[1] = vshrq_n_u8(hbits.val[1], 4);
+ }
+ uint8_t extra = x[i].extra >> (8 + 4*j);
+ bits.b1.val[0] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b1.val[0], vandq_u8(vshlq_n_u8(hbits.val[0], 2), hmask)));
+ bits.b1.val[1] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b1.val[1], vandq_u8(vshlq_n_u8(hbits.val[1], 2), hmask))); extra >>= 1;
+ bits.b1.val[2] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b1.val[2], vandq_u8(vshlq_n_u8(hbits.val[0], 1), hmask)));
+ bits.b1.val[3] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b1.val[3], vandq_u8(vshlq_n_u8(hbits.val[1], 1), hmask))); extra >>= 1;
+ bits.b2.val[0] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b2.val[0], vandq_u8(hbits.val[0], hmask)));
+ bits.b2.val[1] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b2.val[1], vandq_u8(hbits.val[1], hmask))); extra >>= 1;
+ bits.b2.val[2] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b2.val[2], vandq_u8(vshrq_n_u8(hbits.val[0], 1), hmask)));
+ bits.b2.val[3] = vqtbl1q_s8(values.val[extra & 1], vorrq_u8(bits.b2.val[3], vandq_u8(vshrq_n_u8(hbits.val[1], 1), hmask)));
+ }
+ static int8x16x2_t load_values() {
+ auto v1 = vld1_s8(iq3nl_values + 0);
+ auto v2 = vld1_s8(iq3nl_values + 8);
+ return { vcombine_s8(v1, v1), vcombine_s8(v2, v2) };
+ }
+
+ Q2bits bits;
+ uint8x16x2_t hbits;
+ const int8x16x2_t values;
+ const uint8x16_t hmask = vdupq_n_u8(4);
+ const uint8x8_t shmask = vreinterpret_u8_u64(vdup_n_u64(0x8040201008040201));
+};
+
struct DequantizerIQ4KS final : public BaseDequantizer<block_iq4_ks, true> {
DequantizerIQ4KS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc), values(vld1q_s8_x2(iq4k_values)) {}
@@ -4196,6 +4409,71 @@ void iqk_convert_iq2_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int
}
}
+void iqk_convert_iq3_ks_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
+ GGML_ASSERT(n%QK_K == 0);
+ GGML_ASSERT(nrc_x%8 == 0);
+
+ int nb = n/QK_K;
+
+ const block_iq3_ks * x8[8];
+
+ block_q8_k_r8 * y = (block_q8_k_r8 *)vy;
+
+ int8x16x2_t values;
+ {
+ auto v1 = vld1_s8(iq3nl_values+0);
+ auto v2 = vld1_s8(iq3nl_values+8);
+ values.val[0] = vcombine_s8(v1, v1);
+ values.val[1] = vcombine_s8(v2, v2);
+ }
+
+ ggml_half dh[8];
+ int8x16x2_t xv[8];
+ uint32_t block[8];
+ int8_t ls[16];
+
+ auto ml = vdupq_n_u8(0x03);
+ auto mh = vdupq_n_u8(0x04);
+
+ for (int ix = 0; ix < nrc_x; ix += 8) {
+ for (int k = 0; k < 8; ++k) {
+ auto dptr = (const ggml_half *)((const char *)vx + (ix+k)*bx);
+ dh[k] = dptr[0];
+ x8[k] = (const block_iq3_ks *)(dptr + 1);
+ }
+ for (int i = 0; i < nb; ++i) {
+ for (int k = 0; k < 8; ++k) {
+ auto extra = x8[k][i].extra;
+ auto extra_v = extra >> 8;
+ auto hbits = vld1q_u8_x2(x8[k][i].qh);
+ for (int i128 = 0; i128 < 2; ++i128) {
+
+ ls[8*i128+0] = ls[8*i128+1] = int8_t(((x8[k][i].scales[0] >> 4*i128) & 0xf) | ((extra << 4) & 0x10)) - 16;
+ ls[8*i128+2] = ls[8*i128+3] = int8_t(((x8[k][i].scales[1] >> 4*i128) & 0xf) | ((extra << 3) & 0x10)) - 16;
+ ls[8*i128+4] = ls[8*i128+5] = int8_t(((x8[k][i].scales[2] >> 4*i128) & 0xf) | ((extra << 2) & 0x10)) - 16;
+ ls[8*i128+6] = ls[8*i128+7] = int8_t(((x8[k][i].scales[3] >> 4*i128) & 0xf) | ((extra << 1) & 0x10)) - 16;
+
+ auto bits = vld1q_u8_x2(x8[k][i].qs+32*i128);
+ xv[4*i128+0].val[0] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vandq_u8(bits.val[0], ml), vandq_u8(vshlq_n_u8(hbits.val[0], 2), mh)));
+ xv[4*i128+0].val[1] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vandq_u8(bits.val[1], ml), vandq_u8(vshlq_n_u8(hbits.val[1], 2), mh))); extra_v >>= 1;
+ xv[4*i128+1].val[0] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vandq_u8(vshrq_n_u8(bits.val[0], 2), ml), vandq_u8(vshlq_n_u8(hbits.val[0], 1), mh)));
+ xv[4*i128+1].val[1] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vandq_u8(vshrq_n_u8(bits.val[1], 2), ml), vandq_u8(vshlq_n_u8(hbits.val[1], 1), mh))); extra_v >>= 1;
+ xv[4*i128+2].val[0] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vandq_u8(vshrq_n_u8(bits.val[0], 4), ml), vandq_u8(hbits.val[0], mh)));
+ xv[4*i128+2].val[1] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vandq_u8(vshrq_n_u8(bits.val[1], 4), ml), vandq_u8(hbits.val[1], mh))); extra_v >>= 1;
+ xv[4*i128+3].val[0] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vshrq_n_u8(bits.val[0], 6), vandq_u8(vshrq_n_u8(hbits.val[0], 1), mh)));
+ xv[4*i128+3].val[1] = vqtbl1q_s8(values.val[extra_v & 1], vorrq_u8(vshrq_n_u8(bits.val[1], 6), vandq_u8(vshrq_n_u8(hbits.val[1], 1), mh))); extra_v >>= 1;
+ hbits.val[0] = vshrq_n_u8(hbits.val[0], 4);
+ hbits.val[1] = vshrq_n_u8(hbits.val[1], 4);
+ extra >>= 4;
+ }
+ float dnew = convert_to_q8_k_r8(1.f/127, xv, ls, block, (uint32_t *)y[i].qs + k);
+ y[i].d[k] = GGML_FP32_TO_FP16(GGML_FP16_TO_FP32(dh[k])*dnew);
+ }
+ }
+ y += nb;
+ }
+}
+
void iqk_convert_iq3_k_q8_k_r8(int n, const void * vx, size_t bx, void * vy, int nrc_x) {
GGML_ASSERT(n%QK_K == 0);
GGML_ASSERT(nrc_x%8 == 0);
@@ -4405,6 +4683,7 @@ bool iqk_convert_iqk_quants_q80_r8(int type, int n, const void * vx, size_t bx,
switch (ggml_type(type)) {
case GGML_TYPE_IQ2_KS : iqk_convert_iq2_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ2_K : iqk_convert_iq2_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
+ case GGML_TYPE_IQ3_KS : iqk_convert_iq3_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ3_K : iqk_convert_iq3_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ4_KS : iqk_convert_iq4_ks_q8_k_r8(n, vx, bx, vy, nrc_x); break;
case GGML_TYPE_IQ4_K : iqk_convert_iq4_k_q8_k_r8 (n, vx, bx, vy, nrc_x); break;
@@ -4431,6 +4710,9 @@ bool iqk_set_kernels_iqk_quants(int ne00, int typeA, int typeB, std::array<mul_m
case GGML_TYPE_IQ2_K:
IQK_SET_MUL_MAT_FUNCTIONS_T(mul_mat_qX_K_q8_K_T, DequantizerIQ2K, kernels);
break;
+ case GGML_TYPE_IQ3_KS:
+ IQK_SET_MUL_MAT_FUNCTIONS_T(mul_mat_qX_K_q8_K_T, DequantizerIQ3KS, kernels);
+ break;
case GGML_TYPE_IQ3_K:
IQK_SET_MUL_MAT_FUNCTIONS_T(mul_mat_qX_K_q8_K_T, DequantizerIQ3K, kernels);
break;
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index 1e015fb5..0054f6cb 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -251,6 +251,7 @@ struct MulMat {
case GGML_TYPE_Q6_K : return nrc_y >= 64 ? GGML_TYPE_Q8_0_R8 : type;
case GGML_TYPE_IQ2_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
case GGML_TYPE_IQ2_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ3_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
case GGML_TYPE_IQ3_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
case GGML_TYPE_IQ4_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
case GGML_TYPE_IQ4_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
@@ -295,6 +296,7 @@ struct MulMat {
case GGML_TYPE_IQ3_KT : return nrc_y >= 32 ? GGML_TYPE_Q8_0_R8 : type;
case GGML_TYPE_IQ4_KT : return nrc_y >= 32 ? GGML_TYPE_Q8_0_R8 : type;
case GGML_TYPE_IQ2_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
+ case GGML_TYPE_IQ3_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
case GGML_TYPE_IQ4_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
case GGML_TYPE_IQ5_KS : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
case GGML_TYPE_IQ2_K : return nrc_y >= 32 ? GGML_TYPE_Q8_K_R8 : type;
@@ -422,6 +424,7 @@ bool iqk_convert_repack(int typeA, int n, const void * vx, size_t bx, void * vy,
return iqk_convert_iquants_q80_r8(typeA, n, vx, bx, vy, nrc_x);
case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_KSS:
case GGML_TYPE_IQ4_KS:
@@ -824,6 +827,7 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
case GGML_TYPE_IQ3_XXS_R4:
case GGML_TYPE_IQ3_S_R4:
return iqk_set_kernels_iquants(ne00, typeA, typeB, mm.funcs, mm.func16);
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ4_KSS:
@@ -905,6 +909,7 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
return iqk_set_kernels_kquants(ne00, typeA, typeB, m.funcs, m.func16);
case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_KSS:
case GGML_TYPE_IQ4_KS:
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp
index 0384e49a..9095cda4 100644
--- a/ggml/src/iqk/iqk_quantize.cpp
+++ b/ggml/src/iqk/iqk_quantize.cpp
@@ -1820,6 +1820,237 @@ void vec_dot_iq3_k_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx,
}
//
+// ============================================== iq3_ks
+//
+namespace {
+static void quantize_row_iq3_ks_impl(const int super_block_size, const int block_size,
+ int n_per_row, const float * x, char * cy,
+ float * all_scales, float * weight,
+ const int8_t * values,
+ const float * quant_weights,
+ const int ntry) {
+
+ ggml_half * dptr = (ggml_half *)cy;
+ block_iq3_ks * y = (block_iq3_ks *)(dptr + 1);
+
+ const int8_t * shifted_values = values + 8;
+
+ float amax_scale = 0;
+ float max_scale = 0;
+
+ for (int ibl = 0; ibl < n_per_row/super_block_size; ++ibl) {
+ memset(&y[ibl], 0, sizeof(block_iq3_ks));
+ const float * xbl = x + ibl*super_block_size;
+ auto scales = all_scales + ibl*(super_block_size/block_size);
+ float sigma2 = 0;
+ for (int j = 0; j < super_block_size; ++j) sigma2 += xbl[j]*xbl[j];
+ sigma2 *= 2.f/super_block_size;
+ for (int ib = 0; ib < super_block_size/block_size; ++ib) {
+ const float * xb = xbl + ib*block_size;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*super_block_size + ib*block_size;
+ for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
+ }
+ float amax = 0, max = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float ax = fabsf(xb[j]);
+ if (ax > amax) {
+ amax = ax; max = xb[j];
+ }
+ }
+ if (!amax) {
+ scales[ib] = 0;
+ continue;
+ }
+ float d = ntry > 0 ? -max/values[0] : max/values[0];
+ float id = 1/d;
+ float sumqx_p = 0, sumq2_p = 0;
+ float sumqx_m = 0, sumq2_m = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq3nl(values, al);
+ float q = values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq3nl(values, -al);
+ q = values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ d = sumqx_p/sumq2_p;
+ bool is_shifted = false;
+ float best = d*sumqx_p;
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d*sumqx_m;
+ }
+ for (int itry = -ntry; itry <= ntry; ++itry) {
+ id = (itry + values[0])/max;
+ sumqx_p = sumq2_p = 0;
+ sumqx_m = sumq2_m = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq3nl(values, al);
+ float q = values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq3nl(values, -al);
+ q = values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = false;
+ }
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = false;
+ }
+ id = (itry + shifted_values[0])/max;
+ sumqx_p = sumq2_p = 0;
+ sumqx_m = sumq2_m = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq3nl(shifted_values, al);
+ float q = shifted_values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq3nl(shifted_values, -al);
+ q = shifted_values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = true;
+ }
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = true;
+ }
+ }
+ if (is_shifted) y[ibl].extra |= (1 << (8 + ib));
+ scales[ib] = d;
+ float ascale = std::abs(d);
+ if (ascale > amax_scale) {
+ amax_scale = ascale; max_scale = d;
+ }
+ }
+ }
+ float d = -max_scale/16;
+ *dptr = GGML_FP32_TO_FP16(d);
+ if (!d) return;
+ float id = d ? 1/d : 0.f;
+ float sumqx = 0, sumq2 = 0;
+ for (int ibl = 0; ibl < n_per_row/super_block_size; ++ibl) {
+ const float * xbl = x + ibl*super_block_size;
+ float sigma2 = 0;
+ for (int j = 0; j < super_block_size; ++j) sigma2 += xbl[j]*xbl[j];
+ sigma2 *= 2.f/super_block_size;
+ auto scales = all_scales + (super_block_size/block_size)*ibl;
+ for (int ib = 0; ib < super_block_size/block_size; ++ib) {
+ const int8_t * block_values = (y[ibl].extra >> (8 + ib)) & 0x01 ? shifted_values : values;
+ int l = nearest_int(id*scales[ib]);
+ l = std::max(-16, std::min(15, l));
+ uint8_t ul = l + 16;
+ y[ibl].scales[ib%4] |= (ul & 0xf) << 4*(ib/4);
+ y[ibl].extra |= (ul >> 4) << ib;
+ float dl = d * l;
+ float idl = dl ? 1/dl : 0.f;
+ const float * xb = xbl + ib*block_size;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*super_block_size + ib*block_size;
+ for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
+ }
+ auto qs = y[ibl].qs + (ib/4)*block_size;
+ auto qh = y[ibl].qh + (ib/8)*block_size;
+ for (int j = 0; j < block_size; ++j) {
+ uint8_t i = best_index_iq3nl(block_values, idl*xb[j]);
+ qs[j] |= ((i & 3) << 2*(ib%4));
+ qh[j] |= ((i >> 2) << (ib%8));
+ float w = weight[j];
+ float q = block_values[i]*l;
+ sumqx += w*q*xb[j];
+ sumq2 += w*q*q;
+ }
+ }
+ }
+ if (sumq2 > 0) *dptr = GGML_FP32_TO_FP16(sumqx/sumq2);
+}
+}
+
+void quantize_row_iq3_ks_ref(const float * x, block_iq3_ks * y, int64_t k) {
+ quantize_iq3_ks(x, (void *)y, 1, k, nullptr);
+}
+
+void quantize_row_iq3_ks(const float * x, void * y, int64_t k) {
+ quantize_iq3_ks(x, (void *)y, 1, k, nullptr);
+}
+
+size_t quantize_iq3_ks(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+ constexpr int kBlockSize = 32;
+ GGML_ASSERT(n_per_row%QK_K == 0);
+ auto row_size = ggml_row_size(GGML_TYPE_IQ3_KS, n_per_row);
+ char * qrow = (char *)dst;
+ float weight[kBlockSize];
+ std::vector<float> all_scales(n_per_row/kBlockSize);
+ for (int64_t row = 0; row < nrows; ++row) {
+ quantize_row_iq3_ks_impl(QK_K, kBlockSize, n_per_row, src, qrow, all_scales.data(), weight, iq3nl_values, imatrix, 5);
+ src += n_per_row;
+ qrow += row_size;
+ }
+ return nrows * row_size;
+}
+
+void dequantize_row_iq3_ks(const block_iq3_ks * x, float * y, int64_t k) {
+ constexpr int kBlockSize = 32;
+ static_assert(QK_K/kBlockSize == 8);
+ GGML_ASSERT(k%QK_K == 0);
+ const ggml_half * dptr = (const ggml_half *)x;
+ float d = GGML_FP16_TO_FP32(*dptr);
+ x = (const block_iq3_ks *)(dptr + 1);
+ float dl[8];
+ int nblock = k/QK_K;
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+ for (int j = 0; j < 4; ++j) {
+ int ls1 = (x[ibl].scales[j] & 0xf) | (((x[ibl].extra >> (j+0)) & 1) << 4);
+ int ls2 = (x[ibl].scales[j] >> 4) | (((x[ibl].extra >> (j+4)) & 1) << 4);
+ dl[j+0] = d*(ls1 - 16);
+ dl[j+4] = d*(ls2 - 16);
+ }
+ auto qs = x[ibl].qs;
+ auto qh = x[ibl].qh;
+ for (int i128 = 0; i128 < QK_K/128; ++i128) {
+ for (int ib = 0; ib < 4; ++ib) {
+ const int8_t * values = iq3nl_values + ((x[ibl].extra >> (8 + (4*i128+ib)) & 1) << 3);
+ for (int j = 0; j < kBlockSize; ++j) {
+ y[j] = dl[4*i128 + ib] * values[((qs[j] >> 2*ib) & 3) | (((qh[j] >> (4*i128+ib)) & 1) << 2)];
+ }
+ y += kBlockSize;
+ }
+ qs += kBlockSize;
+ }
+ }
+}
+
+void vec_dot_iq3_ks_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+#if GGML_USE_IQK_MULMAT
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ3_KS, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
+ return;
+ }
+#endif
+ GGML_ASSERT(n%QK_K == 0);
+ GGML_ASSERT(nrc == 1);
+ GGML_UNUSED(bs);
+ GGML_UNUSED(bx);
+ GGML_UNUSED(by);
+ GGML_ABORT("Not implemented");
+}
+
+//
// ============================================== iq4_K
//
void dequantize_row_iq4_k(const block_iq4_k * x, float * y, int64_t k) {
diff --git a/ggml/src/iqk/iqk_quantize.h b/ggml/src/iqk/iqk_quantize.h
index 70918a65..3fc02a5e 100644
--- a/ggml/src/iqk/iqk_quantize.h
+++ b/ggml/src/iqk/iqk_quantize.h
@@ -31,6 +31,12 @@ size_t quantize_iq3_k(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst,
void dequantize_row_iq3_k(const block_iq3_k * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_iq3_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+void quantize_row_iq3_ks_ref(const float * GGML_RESTRICT x, block_iq3_ks * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq3_ks(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+size_t quantize_iq3_ks(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
+void dequantize_row_iq3_ks(const block_iq3_ks * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
+void vec_dot_iq3_ks_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+
void quantize_row_iq4_k_ref(const float * GGML_RESTRICT x, block_iq4_k * GGML_RESTRICT y, int64_t k);
void quantize_row_iq4_k(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
size_t quantize_iq4_k(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
diff --git a/include/llama.h b/include/llama.h
index 53adeb94..6c8bff95 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -201,6 +201,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_IQ2_KT = 151, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_KT = 152, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_KT = 153, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_IQ3_KS = 154, // except 1d tensors
//
LLAMA_FTYPE_MOSTLY_Q4_0_R8 = 202, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q8_0_R8 = 207, // except 1d tensors
diff --git a/src/llama.cpp b/src/llama.cpp
index 1ea2084d..8823ab5b 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -4374,6 +4374,7 @@ struct llama_model_loader {
case GGML_TYPE_IQ5_KS: ftype = LLAMA_FTYPE_MOSTLY_IQ5_KS; break;
case GGML_TYPE_IQ2_K: ftype = LLAMA_FTYPE_MOSTLY_IQ2_K; break;
case GGML_TYPE_IQ2_K_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ2_K_R4;break;
+ case GGML_TYPE_IQ3_KS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_KS; break;
case GGML_TYPE_IQ3_K: ftype = LLAMA_FTYPE_MOSTLY_IQ3_K; break;
case GGML_TYPE_IQ3_K_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ3_K_R4;break;
case GGML_TYPE_IQ4_K: ftype = LLAMA_FTYPE_MOSTLY_IQ4_K; break;
@@ -5115,6 +5116,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
case LLAMA_FTYPE_MOSTLY_IQ5_KS: return "IQ5_KS - 5.25 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_K: return "IQ2_K - 2.375 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_K_R4: return "IQ2_K_R4 - 2.375 bpw";
+ case LLAMA_FTYPE_MOSTLY_IQ3_KS: return "IQ3_KS - 3.1875 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_K: return "IQ3_K - 3.4325 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_K_R4: return "IQ3_K_R4 - 3.4325 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_KL: return "IQ3_KL - 4 bpw";
@@ -18642,7 +18644,7 @@ static ggml_type change_type_if_necessary(ggml_type new_type, int nx, int ny) {
new_type == GGML_TYPE_IQ4_K_R4|| new_type == GGML_TYPE_Q8_K_R8 || new_type == GGML_TYPE_IQ3_K_R4||
new_type == GGML_TYPE_IQ2_K_R4|| new_type == GGML_TYPE_IQ5_K_R4|| new_type == GGML_TYPE_IQ4_KS_R4 ||
new_type == GGML_TYPE_IQ3_XXS_R4 || new_type == GGML_TYPE_IQ2_XXS_R4 || new_type == GGML_TYPE_IQ2_XS_R4 ||
- new_type == GGML_TYPE_IQ2_S_R4|| new_type == GGML_TYPE_IQ3_S_R4||
+ new_type == GGML_TYPE_IQ2_S_R4|| new_type == GGML_TYPE_IQ3_S_R4|| new_type == GGML_TYPE_IQ3_KS ||
new_type == GGML_TYPE_IQ2_KT || new_type == GGML_TYPE_IQ3_KT || new_type == GGML_TYPE_IQ4_KT ||
new_type == GGML_TYPE_IQ5_KS || new_type == GGML_TYPE_IQ5_KS_R4) {
if (nx % QK_K != 0) {
@@ -18676,6 +18678,7 @@ static ggml_type change_type_if_necessary(ggml_type new_type, int nx, int ny) {
case GGML_TYPE_Q3_K_R4:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
+ case GGML_TYPE_IQ3_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ3_K_R4:
case GGML_TYPE_IQ4_KSS:
@@ -18810,7 +18813,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ||
ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ2_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 ||
+ ftype == LLAMA_FTYPE_MOSTLY_IQ2_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_KS ||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 ||
ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M_R4 ||
ftype == LLAMA_FTYPE_MOSTLY_IQ1_S_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M_R4 ||
@@ -19003,6 +19006,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_K && qs.model.hparams.n_gqa() >= 2) {
new_type = GGML_TYPE_IQ4_K;
}
+ else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_KS && qs.model.hparams.n_gqa() >= 2) {
+ new_type = GGML_TYPE_IQ4_KS;
+ }
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 && qs.model.hparams.n_gqa() >= 2) {
new_type = GGML_TYPE_IQ4_K_R4;
}
@@ -19059,6 +19065,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
else if (new_type == GGML_TYPE_Q2_K_R4 || new_type == GGML_TYPE_IQ3_XXS_R4) new_type = GGML_TYPE_IQ3_K_R4;
else if (new_type == GGML_TYPE_Q3_K || new_type == GGML_TYPE_IQ3_S) new_type = GGML_TYPE_Q4_K;
else if (new_type == GGML_TYPE_IQ3_K) new_type = GGML_TYPE_IQ4_K;
+ else if (new_type == GGML_TYPE_IQ3_KS) new_type = GGML_TYPE_IQ4_KS;
else if (new_type == GGML_TYPE_IQ3_S_R4) new_type = GGML_TYPE_Q4_K_R4;
else if (new_type == GGML_TYPE_Q3_K_R4) new_type = GGML_TYPE_Q4_K_R4;
else if (new_type == GGML_TYPE_Q4_K || new_type == GGML_TYPE_IQ4_XS) new_type = GGML_TYPE_Q5_K;
@@ -19185,7 +19192,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS || ftype == LLAMA_FTYPE_MOSTLY_IQ5_KS_R4 ||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_KL ||
ftype == LLAMA_FTYPE_MOSTLY_Q4_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ4_NL_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS_R8 ||
- ftype == LLAMA_FTYPE_MOSTLY_Q3_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_KT ||
+ ftype == LLAMA_FTYPE_MOSTLY_Q3_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_KT || ftype == LLAMA_FTYPE_MOSTLY_IQ3_KS ||
ftype == LLAMA_FTYPE_MOSTLY_Q2_K_R4|| ftype == LLAMA_FTYPE_MOSTLY_IQ4_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K_R4 ||
ftype == LLAMA_FTYPE_MOSTLY_IQ2_K_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS_R4 || ftype == LLAMA_FTYPE_MOSTLY_IQ3_S_R4) {
new_type = GGML_TYPE_Q5_K; // should the IQ_K quants be applied here as the new type for the IQ_K ftypes ?
@@ -19427,6 +19434,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_IQ5_KS: default_type = GGML_TYPE_IQ5_KS; break;
case LLAMA_FTYPE_MOSTLY_IQ2_K: default_type = GGML_TYPE_IQ2_K; break;
case LLAMA_FTYPE_MOSTLY_IQ2_K_R4:default_type = GGML_TYPE_IQ2_K_R4;break;
+ case LLAMA_FTYPE_MOSTLY_IQ3_KS: default_type = GGML_TYPE_IQ3_KS; break;
case LLAMA_FTYPE_MOSTLY_IQ3_K: default_type = GGML_TYPE_IQ3_K; break;
case LLAMA_FTYPE_MOSTLY_IQ3_K_R4:default_type = GGML_TYPE_IQ3_K_R4;break;
case LLAMA_FTYPE_MOSTLY_IQ3_KL: default_type = GGML_TYPE_IQ3_K; break;