summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-05-15 16:02:39 +0300
committerGitHub <noreply@github.com>2025-05-15 16:02:39 +0300
commit3d92d7f802b332927669f01bfa51ebbb56e868ba (patch)
treec3913f67e36492c723cc47fe512078ee0dd19d59
parent3f8c865b920df844ba0cb4ba53c1ccce8874b045 (diff)
Adding IQ5_KS - 5.25 bpw quants (#422)
* iq5_ks: basics * iq5_ks: quantize * iq5_ks: CUDA dequantize works * iq5_ks: dot product works on CUDA * iq5_ks: MMQ works * iq5_ks: Zen4 * iq5_ks: AVX2 But is is not quite right, just like iq4_k, iq5_k, iq6_k, iq4_ks. All these need fixing on AVX2. * iq5_ks: NEON * iq5_ks: Metal dequantize * iq5_ks: Metal dot product --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
-rw-r--r--examples/quantize/quantize.cpp1
-rw-r--r--ggml/include/ggml.h2
-rw-r--r--ggml/src/ggml-common.h7
-rw-r--r--ggml/src/ggml-cuda.cu1
-rw-r--r--ggml/src/ggml-cuda/common.cuh7
-rw-r--r--ggml/src/ggml-cuda/convert.cu54
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cu41
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cuh5
-rw-r--r--ggml/src/ggml-cuda/mmq.cu4
-rw-r--r--ggml/src/ggml-cuda/mmq.cuh73
-rw-r--r--ggml/src/ggml-cuda/mmvq.cu3
-rw-r--r--ggml/src/ggml-metal.m38
-rw-r--r--ggml/src/ggml-metal.metal164
-rw-r--r--ggml/src/ggml-quants.c1
-rw-r--r--ggml/src/ggml.c22
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp172
-rw-r--r--ggml/src/iqk/iqk_quantize.cpp244
-rw-r--r--ggml/src/iqk/iqk_quantize.h6
-rw-r--r--include/llama.h3
-rw-r--r--src/llama.cpp6
20 files changed, 848 insertions, 6 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 60cf260c..1b388a73 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -68,6 +68,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "IQ4_KS", LLAMA_FTYPE_MOSTLY_IQ4_KS, " 4.25 bpw non-linear quantization", },
{ "IQ4_KS_R4",LLAMA_FTYPE_MOSTLY_IQ4_KS_R4,"IQ4_KS repacked", },
{ "IQ4_KSS", LLAMA_FTYPE_MOSTLY_IQ4_KSS, " 4.0 bpw non-linear quantization", },
+ { "IQ5_KS", LLAMA_FTYPE_MOSTLY_IQ5_KS, " 5.25 bpw non-linear quantization", },
{ "IQ2_K", LLAMA_FTYPE_MOSTLY_IQ2_K, " 2.375 bpw non-linear quantization",},
{ "IQ2_K_R4", LLAMA_FTYPE_MOSTLY_IQ2_K_R4, "IQ2_K repacked",},
{ "IQ2_KS", LLAMA_FTYPE_MOSTLY_IQ2_KS, " 2.1875 bpw non-linear quantization",},
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index beeb3c09..b6f461ed 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -425,6 +425,7 @@ extern "C" {
GGML_TYPE_Q8_KR8 = 149,
GGML_TYPE_Q8_K128 = 150,
GGML_TYPE_Q8_KV = 151,
+ GGML_TYPE_IQ5_KS = 152,
GGML_TYPE_Q4_0_R8 = 202,
GGML_TYPE_Q5_0_R4 = 206,
@@ -512,6 +513,7 @@ extern "C" {
GGML_FTYPE_MOSTLY_IQ2_KS = 138, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_KSS = 139, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_KV = 140, // except 1d tensors
+ GGML_FTYPE_MOSTLY_IQ5_KS = 141, // except 1d tensors
//
GGML_FTYPE_MOSTLY_Q4_0_R8 = 202, // except 1d tensors
GGML_FTYPE_MOSTLY_Q8_0_R8 = 207, // except 1d tensors
diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h
index 5a6417fc..1c2d1b17 100644
--- a/ggml/src/ggml-common.h
+++ b/ggml/src/ggml-common.h
@@ -687,6 +687,13 @@ typedef struct {
} block_iq6_k;
static_assert(sizeof(block_iq6_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/2 + QK_K/4 + QK_K/16, "wrong iq6_k block size/padding");
+typedef struct {
+ uint8_t scales[QK_K/32];
+ uint8_t qs[QK_K/2];
+ uint8_t qh[QK_K/8];
+} block_iq5_ks;
+static_assert(sizeof(block_iq5_ks) == QK_K/32 + QK_K/2 + QK_K/8, "wrong iq5_ks block size/padding");
+
#endif // GGML_COMMON_DECL
#endif // GGML_COMMON_DECL
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
index 8a201352..cf17cbc1 100644
--- a/ggml/src/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda.cu
@@ -3451,6 +3451,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh
index 0a7f7f83..a04a1929 100644
--- a/ggml/src/ggml-cuda/common.cuh
+++ b/ggml/src/ggml-cuda/common.cuh
@@ -600,6 +600,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ5_K> {
};
template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ5_KS> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR5_XS;
+ static constexpr int qi = QI5_XS;
+};
+
+template<>
struct ggml_cuda_type_traits<GGML_TYPE_IQ6_K> {
static constexpr int qk = QK_K;
static constexpr int qr = QR6_XS;
diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index 8383f2d3..5afe8c74 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -696,6 +696,46 @@ static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_
}
}
+
+template<typename dst_t>
+static __global__ void dequantize_block_iq5_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
+
+ int64_t ii = blockIdx.x;
+ int64_t row = (QK_K * ii) / n_per_row;
+ const char * cx = (const char *)vx + row * row_size;
+ float d = *(const float *)cx;
+ const block_iq5_ks * x = (const block_iq5_ks *)(cx + sizeof(float));
+ const int64_t i = ii - (row*n_per_row)/QK_K;
+
+ const int tid = threadIdx.x;
+ int ib64 = tid/8; // 0...3
+ int il = tid%8; // 0...7
+ dst_t * y = yy + ii*QK_K + 64*ib64 + 2*il;
+ const float dl1 = d * ((int)(x[i].scales[2*ib64+0] & 254) - 127);
+ const float dl2 = d * ((int)(x[i].scales[2*ib64+1] & 254) - 127);
+ const uint8_t * qs = x[i].qs + 32*ib64 + 2*il;
+ const uint8_t * qh = x[i].qh + 2*il;
+ auto values1 = iq5nl_values + ((x[i].scales[2*ib64+0] & 1) << 5);
+ auto values2 = iq5nl_values + ((x[i].scales[2*ib64+1] & 1) << 5);
+ if constexpr (std::is_same_v<dst_t, nv_bfloat16>) {
+ for (int j = 0; j < 2; ++j) {
+ const uint8_t h1 = qh[j] >> 2*(ib64%4), h2 = qh[j+16] >> 2*(ib64%4);
+ y[j+ 0] = __float2bfloat16(dl1 * values1[(qs[j+ 0] & 0xf) | ((h1 & 1) << 4)]);
+ y[j+16] = __float2bfloat16(dl1 * values1[(qs[j+16] & 0xf) | ((h2 & 1) << 4)]);
+ y[j+32] = __float2bfloat16(dl2 * values2[(qs[j+ 0] >> 4) | ((h1 & 2) << 3)]);
+ y[j+48] = __float2bfloat16(dl2 * values2[(qs[j+16] >> 4) | ((h2 & 2) << 3)]);
+ }
+ } else {
+ for (int j = 0; j < 2; ++j) {
+ const uint8_t h1 = qh[j] >> 2*(ib64%4), h2 = qh[j+16] >> 2*(ib64%4);
+ y[j+ 0] = dl1 * values1[(qs[j+ 0] & 0xf) | ((h1 & 1) << 4)];
+ y[j+16] = dl1 * values1[(qs[j+16] & 0xf) | ((h2 & 1) << 4)];
+ y[j+32] = dl2 * values2[(qs[j+ 0] >> 4) | ((h1 & 2) << 3)];
+ y[j+48] = dl2 * values2[(qs[j+16] >> 4) | ((h2 & 2) << 3)];
+ }
+ }
+}
+
template<typename dst_t>
static __global__ void dequantize_block_iq6_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
@@ -1009,6 +1049,14 @@ static void dequantize_row_iq4_ks_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq5_ks_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
+ const int64_t k = nrows * n_per_row;
+ const int64_t row_size = ggml_row_size(GGML_TYPE_IQ5_KS, n_per_row);
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq5_ks<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
+}
+
+template<typename dst_t>
static void dequantize_row_iq4_kss_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
const int64_t row_size = ggml_row_size(GGML_TYPE_IQ4_KSS, n_per_row);
@@ -1140,6 +1188,8 @@ to_bf16_cuda_t ggml_get_to_bf16_cuda(ggml_type type) {
return dequantize_row_iq4_kss_cuda<nv_bfloat16>;
case GGML_TYPE_IQ4_KS:
return dequantize_row_iq4_ks_cuda<nv_bfloat16>;
+ case GGML_TYPE_IQ5_KS:
+ return dequantize_row_iq5_ks_cuda<nv_bfloat16>;
case GGML_TYPE_IQ4_K:
return dequantize_row_iq4_k_cuda<nv_bfloat16>;
case GGML_TYPE_IQ5_K:
@@ -1202,6 +1252,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
return dequantize_row_iq4_ks_cuda;
case GGML_TYPE_IQ4_KSS:
return dequantize_row_iq4_kss_cuda;
+ case GGML_TYPE_IQ5_KS:
+ return dequantize_row_iq5_ks_cuda;
case GGML_TYPE_IQ2_KS:
return dequantize_row_iq2_ks_cuda;
case GGML_TYPE_IQ2_K:
@@ -1273,6 +1325,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
return dequantize_row_iq4_ks_cuda;
case GGML_TYPE_IQ4_KSS:
return dequantize_row_iq4_kss_cuda;
+ case GGML_TYPE_IQ5_KS:
+ return dequantize_row_iq5_ks_cuda;
case GGML_TYPE_IQ2_KS:
return dequantize_row_iq2_ks_cuda;
case GGML_TYPE_IQ2_K:
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu
index 576c387d..6a2db725 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cu
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cu
@@ -328,6 +328,39 @@ __device__ __forceinline__ float vec_dot_iq5_k_q8_1(
return d5 * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2);
}
+__device__ __forceinline__ float vec_dot_iq5_ks_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+
+ float scale = *(const float *)vbq;
+ const block_iq5_ks * bq5 = (const block_iq5_ks *)((const char *)vbq + sizeof(float)) + kbx;
+ const uint8_t * all_values = (const uint8_t *)iq5nl_values;
+
+ int i4 = iqs/4; // 0...7. Blocks of 16 index is 4*(i4/2) + (i4%2) + (0 and 2)
+
+ const int32_t * q8_1 = (const int *)bq8_1[2*(i4/2)+0].qs + 4*(i4%2);
+ const int32_t * q8_2 = (const int *)bq8_1[2*(i4/2)+1].qs + 4*(i4%2);
+ const uint32_t * q4 = (const uint32_t *)bq5->qs + 8*(i4/2) + 4*(i4%2);
+ const uint32_t * qh = (const uint32_t *)bq5->qh + 4*(i4%2);
+ const uint8_t * values1 = all_values + ((bq5->scales[2*(i4/2)+0] & 1) << 5);
+ const uint8_t * values2 = all_values + ((bq5->scales[2*(i4/2)+1] & 1) << 5);
+ uint32_t aux32[2];
+ const uint8_t * a8 = (const uint8_t *)aux32;
+ int v1, v2;
+ int sumi1 = 0, sumi2 = 0;
+ for (int j = 0; j < 4; ++j) {
+ uint32_t h = qh[j] >> 2*(i4/2);
+ aux32[0] = ((q4[j] >> 0) & 0x0f0f0f0f) | ((h << 4) & 0x10101010);
+ aux32[1] = ((q4[j] >> 4) & 0x0f0f0f0f) | ((h << 3) & 0x10101010);
+ v1 = int_from_table(a8+0, values1);
+ v2 = int_from_table(a8+4, values2);
+ sumi1 = ggml_cuda_dp4a(v1, q8_1[j], sumi1);
+ sumi2 = ggml_cuda_dp4a(v2, q8_2[j], sumi2);
+ }
+ const int ls1 = (bq5->scales[2*(i4/2)+0] & 254) - 127;
+ const int ls2 = (bq5->scales[2*(i4/2)+1] & 254) - 127;
+ return scale * (__low2float(bq8_1[2*(i4/2)+0].ds) * sumi1 * ls1 + __low2float(bq8_1[2*(i4/2)+1].ds) * sumi2 * ls2);
+}
+
#define VDR_IQ6_K_Q8_1_MMVQ 4
#define VDR_IQ6_K_Q8_1_MMQ 4
@@ -799,6 +832,14 @@ void mul_mat_vec_iq5_k_q8_1_cuda(
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ5_K, VDR_IQ5_K_Q8_1_MMVQ, vec_dot_iq5_k_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
}
+void mul_mat_vec_iq5_ks_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, int64_t ids_nb0, cudaStream_t stream) {
+
+ iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ5_KS, VDR_IQ5_K_Q8_1_MMVQ, vec_dot_iq5_ks_q8_1>(vx, vy, dst, ids_data, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+}
+
void mul_mat_vec_iq6_k_q8_1_cuda(
const void * vx, const void * vy, float * dst, const char * ids_data,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh
index 1f55ddb9..b81d2114 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cuh
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh
@@ -26,6 +26,11 @@ void mul_mat_vec_iq5_k_q8_1_cuda(
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+void mul_mat_vec_iq5_ks_q8_1_cuda(
+ const void * vx, const void * vy, float * dst, const char * ids_data,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
+ const int ne2, const uint64_t nb02, const uint64_t nb12, const uint64_t nb2, const int64_t ids_nb0, cudaStream_t stream);
+
void mul_mat_vec_iq6_k_q8_1_cuda(
const void * vx, const void * vy, float * dst, const char * ids_data,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst,
diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu
index 7bee10cb..2f7a9bfd 100644
--- a/ggml/src/ggml-cuda/mmq.cu
+++ b/ggml/src/ggml-cuda/mmq.cu
@@ -94,6 +94,9 @@ void ggml_cuda_op_mul_mat_q(
case GGML_TYPE_IQ4_KS:
mul_mat_q_case<GGML_TYPE_IQ4_KS>(ctx, args, stream);
break;
+ case GGML_TYPE_IQ5_KS:
+ mul_mat_q_case<GGML_TYPE_IQ5_KS>(ctx, args, stream);
+ break;
case GGML_TYPE_IQ2_KS:
mul_mat_q_case<GGML_TYPE_IQ2_KS>(ctx, args, stream);
break;
@@ -150,6 +153,7 @@ bool ggml_cuda_should_use_mmq(enum ggml_type type, int cc, int64_t ne11) {
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_KS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ3_K:
diff --git a/ggml/src/ggml-cuda/mmq.cuh b/ggml/src/ggml-cuda/mmq.cuh
index 1da9a67a..72fa9f13 100644
--- a/ggml/src/ggml-cuda/mmq.cuh
+++ b/ggml/src/ggml-cuda/mmq.cuh
@@ -88,6 +88,7 @@ static mmq_q8_1_ds_layout mmq_get_q8_1_ds_layout(const ggml_type type_x) {
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ6_K:
return MMQ_Q8_1_DS_LAYOUT_D4;
default:
@@ -187,6 +188,7 @@ static constexpr __host__ __device__ tile_x_sizes mmq_get_dp4a_tile_x_sizes(ggml
case GGML_TYPE_IQ4_XS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_NL : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ4_KS : return MMQ_DP4A_TXS_Q8_0;
+ case GGML_TYPE_IQ5_KS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ2_KS : return MMQ_DP4A_TXS_Q8_0;
case GGML_TYPE_IQ2_K : return MMQ_DP4A_TXS_Q8_0_16;
case GGML_TYPE_IQ3_K : return MMQ_DP4A_TXS_Q8_0_16;
@@ -231,6 +233,7 @@ static constexpr __host__ __device__ int mmq_get_mma_tile_x_k(ggml_type type) {
case GGML_TYPE_IQ4_XS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_NL : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ4_KS : return MMQ_MMA_TILE_X_K_Q8_0;
+ case GGML_TYPE_IQ5_KS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ2_KS : return MMQ_MMA_TILE_X_K_Q8_0;
case GGML_TYPE_IQ2_K : return MMQ_MMA_TILE_X_K_Q3_K;
case GGML_TYPE_IQ3_K : return MMQ_MMA_TILE_X_K_Q3_K;
@@ -2794,6 +2797,67 @@ template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinlin
}
}
+template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq5_ks(
+ const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
+
+#ifdef INT8_MMA_AVAILABLE
+ int * x_qs = (int *) x_tile;
+ float * x_df = (float *) (x_qs + WARP_SIZE*2);
+#else
+ constexpr tile_x_sizes txs = mmq_get_dp4a_tile_x_sizes(GGML_TYPE_IQ5_KS, mmq_y);
+ int * x_qs = (int *) x_tile;
+ float * x_df = (float *) (x_qs + txs.qs);
+#endif // INT8_MMA_AVAILABLE
+
+ constexpr int qstep = 8;
+ const int kqsx = threadIdx.x % qstep;
+
+ auto values = iq5nl_values;
+
+ uint32_t aux32[2];
+ const uint8_t * aux8 = (const uint8_t *)aux32;
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * WARP_SIZE/qstep) {
+ int i = i0 + threadIdx.y*(WARP_SIZE/qstep) + threadIdx.x/qstep;
+
+ if (need_check) {
+ i = min(i, i_max);
+ }
+
+ const float * dptr = (const float *)(x + i*stride);
+ const float d = dptr[0];
+ const block_iq5_ks * bxi = (const block_iq5_ks *)(dptr + 1) + kbx0;
+
+ int qh = get_int_b4(bxi->qh, kqsx);
+
+ #pragma unroll
+ for (int l = 0; l < qstep/2; ++l) {
+
+ const int ql = get_int_b4(bxi->qs, kqsx + qstep*l);
+ aux32[0] = ((ql >> 0) & 0x0f0f0f0f) | ((qh & 0x01010101) << 4) | ((bxi->scales[2*l+0] & 1) * 0x20202020);
+ aux32[1] = ((ql >> 4) & 0x0f0f0f0f) | ((qh & 0x02020202) << 3) | ((bxi->scales[2*l+1] & 1) * 0x20202020);
+ qh >>= 2;
+
+ const char4 val0 = make_char4(values[aux8[0]], values[aux8[1]], values[aux8[2]], values[aux8[3]]);
+ const char4 val1 = make_char4(values[aux8[4]], values[aux8[5]], values[aux8[6]], values[aux8[7]]);
+
+#ifdef INT8_MMA_AVAILABLE
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx + 16*l + 0] = *(const int *)&val0;
+ x_qs[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx + 16*l + 8] = *(const int *)&val1;
+#else
+ x_qs[i*(2*WARP_SIZE + 1) + kqsx + 16*l + 0] = *(const int *)&val0;
+ x_qs[i*(2*WARP_SIZE + 1) + kqsx + 16*l + 8] = *(const int *)&val1;
+#endif // INT8_MMA_AVAILABLE
+ }
+
+#ifdef INT8_MMA_AVAILABLE
+ x_df[i*MMQ_MMA_TILE_X_K_Q8_0 + kqsx] = d * ((bxi->scales[kqsx] & 254) - 127);
+#else
+ x_df[i*(2*WARP_SIZE*2/QI8_0) + i/(QI8_0/4) + kqsx] = d * ((bxi->scales[kqsx] & 254) - 127);
+#endif // INT8_MMA_AVAILABLE
+ }
+}
+
template <int mmq_y, int nwarps, bool need_check> static __device__ __forceinline__ void load_tiles_iq6_k(
const char * __restrict__ x, int * __restrict__ x_tile, const int & kbx0, const int & i_max, const int & stride) {
@@ -3139,6 +3203,14 @@ struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ4_KS> {
static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
};
+template <int mmq_x, int mmq_y, int nwarps, bool need_check>
+struct mmq_type_traits<mmq_x, mmq_y, nwarps, need_check, GGML_TYPE_IQ5_KS> {
+ static constexpr int vdr = VDR_IQ4_XS_Q8_1_MMQ;
+ static constexpr load_tiles_mmq_t load_tiles = load_tiles_iq5_ks<mmq_y, nwarps, need_check>;
+ static constexpr vec_dot_mmq_t vec_dot_mma = vec_dot_q8_0_q8_1_mma<mmq_x, mmq_y, nwarps, MMQ_Q8_1_DS_LAYOUT_D4>;
+ static constexpr vec_dot_mmq_t vec_dot_dp4a = vec_dot_q8_0_q8_1_dp4a<mmq_x, mmq_y, nwarps>;
+};
+
template <ggml_type type, int mmq_x, int nwarps, bool need_check, bool fixup>
static __device__ void mul_mat_q_process_tile(
const char * __restrict__ x, const char * __restrict__ yc, float * __restrict__ dst, float * __restrict__ tmp_fixup,
@@ -3581,6 +3653,7 @@ extern DECL_MMQ_CASE(GGML_TYPE_IQ2_K);
extern DECL_MMQ_CASE(GGML_TYPE_IQ3_K);
extern DECL_MMQ_CASE(GGML_TYPE_IQ4_K);
extern DECL_MMQ_CASE(GGML_TYPE_IQ5_K);
+extern DECL_MMQ_CASE(GGML_TYPE_IQ5_KS);
extern DECL_MMQ_CASE(GGML_TYPE_IQ6_K);
// -------------------------------------------------------------------------------------------------------------------------
diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu
index c6b6ef72..14fe2547 100644
--- a/ggml/src/ggml-cuda/mmvq.cu
+++ b/ggml/src/ggml-cuda/mmvq.cu
@@ -530,6 +530,9 @@ static void ggml_cuda_op_mul_mat_vec_q_impl(ggml_backend_cuda_context & ctx, ggm
case GGML_TYPE_IQ5_K:
mul_mat_vec_iq5_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
+ case GGML_TYPE_IQ5_KS:
+ mul_mat_vec_iq5_ks_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
+ break;
case GGML_TYPE_IQ6_K:
mul_mat_vec_iq6_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ids_data, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, ne2, nb02, nb12, nb2, ids_nb0, stream);
break;
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
index 501fe5a2..13d7b97b 100644
--- a/ggml/src/ggml-metal.m
+++ b/ggml/src/ggml-metal.m
@@ -107,6 +107,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_NL,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS,
+ GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_KS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KSS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_K,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_KS,
@@ -150,6 +151,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KSS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MV_IQ5_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_K_F32,
@@ -186,6 +188,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KSS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ5_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_K_F32,
@@ -219,6 +222,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F32,
@@ -252,6 +256,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F16,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F16,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F16,
@@ -285,6 +290,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KSS_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_K_F32,
@@ -734,6 +740,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS, get_rows_iq4_ks, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KSS, get_rows_iq4_kss, true);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_KS, get_rows_iq5_ks, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_K, get_rows_iq2_k, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_KS, get_rows_iq2_ks, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_K, get_rows_iq3_k, true);
@@ -776,6 +783,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KS_F32, mul_mv_iq4_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KSS_F32, mul_mv_iq4_kss_f32, ctx->support_simdgroup_reduction);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ5_KS_F32, mul_mv_iq5_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_K_F32, mul_mv_iq2_k_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_KS_F32, mul_mv_iq2_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_K_F32, mul_mv_iq3_k_f32, ctx->support_simdgroup_reduction);
@@ -812,6 +820,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KS_F32, mul_mv_id_iq4_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KSS_F32, mul_mv_id_iq4_kss_f32, ctx->support_simdgroup_reduction);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ5_KS_F32, mul_mv_id_iq5_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_K_F32, mul_mv_id_iq2_k_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_KS_F32, mul_mv_id_iq2_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_K_F32, mul_mv_id_iq3_k_f32, ctx->support_simdgroup_reduction);
@@ -845,6 +854,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32, mul_mm_iq4_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F32, mul_mm_iq4_kss_f32, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F32, mul_mm_iq5_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F32, mul_mm_iq2_k_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F32, mul_mm_iq2_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F32, mul_mm_iq3_k_f32, ctx->support_simdgroup_mm);
@@ -878,6 +888,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F16, mul_mm_iq4_xs_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F16, mul_mm_iq4_ks_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F16, mul_mm_iq4_kss_f16, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F16, mul_mm_iq5_ks_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F16, mul_mm_iq2_k_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F16, mul_mm_iq2_ks_f16, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F16, mul_mm_iq3_k_f16, ctx->support_simdgroup_mm);
@@ -911,6 +922,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32, mul_mm_id_iq4_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KSS_F32, mul_mm_id_iq4_kss_f32, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_KS_F32, mul_mm_id_iq5_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_K_F32, mul_mm_id_iq2_k_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_KS_F32, mul_mm_id_iq2_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_K_F32, mul_mm_id_iq3_k_f32, ctx->support_simdgroup_mm);
@@ -2123,6 +2135,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F32].pipeline; break;
+ case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F32 ].pipeline; break;
case GGML_TYPE_IQ2_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F32 ].pipeline; break;
@@ -2161,6 +2174,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F16 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F16 ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KSS_F16].pipeline; break;
+ case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_KS_F16 ].pipeline; break;
case GGML_TYPE_IQ2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F16 ].pipeline; break;
case GGML_TYPE_IQ2_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F16 ].pipeline; break;
case GGML_TYPE_IQ3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F16 ].pipeline; break;
@@ -2384,6 +2398,12 @@ static void ggml_metal_encode_node(
nth1 = 16;
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KSS_F32].pipeline;
} break;
+ case GGML_TYPE_IQ5_KS:
+ {
+ nth0 = 4;
+ nth1 = 16;
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ5_KS_F32].pipeline;
+ } break;
case GGML_TYPE_IQ2_K:
{
nth0 = 4;
@@ -2471,8 +2491,9 @@ static void ggml_metal_encode_node(
}
else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS || src0t == GGML_TYPE_IQ4_K ||
src0t == GGML_TYPE_IQ5_K || src0t == GGML_TYPE_IQ6_K || src0t == GGML_TYPE_IQ4_KS||
- src0t == GGML_TYPE_IQ4_KSS) {
- const int mem_size = src0t == GGML_TYPE_IQ6_K ? 128*sizeof(float) : GGML_TYPE_IQ5_K ? 64*sizeof(float) : 32*sizeof(float);
+ src0t == GGML_TYPE_IQ4_KSS || src0t == GGML_TYPE_IQ5_KS) {
+ const int mem_size = src0t == GGML_TYPE_IQ6_K ? 128*sizeof(float)
+ : src0t == GGML_TYPE_IQ5_K || src0t == GGML_TYPE_IQ5_KS ? 64*sizeof(float) : 32*sizeof(float);
[encoder setThreadgroupMemoryLength:mem_size atIndex:0];
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
@@ -2568,6 +2589,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KSS_F32].pipeline; break;
+ case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_K_F32 ].pipeline; break;
case GGML_TYPE_IQ2_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_K_F32 ].pipeline; break;
@@ -2775,6 +2797,12 @@ static void ggml_metal_encode_node(
nth1 = 16;
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KSS_F32].pipeline;
} break;
+ case GGML_TYPE_IQ5_KS:
+ {
+ nth0 = 4;
+ nth1 = 16;
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ5_KS_F32].pipeline;
+ } break;
case GGML_TYPE_IQ2_K:
{
nth0 = 4;
@@ -2873,8 +2901,9 @@ static void ggml_metal_encode_node(
}
else if (src0t == GGML_TYPE_IQ4_NL || src0t == GGML_TYPE_IQ4_XS || src0t == GGML_TYPE_IQ4_K ||
src0t == GGML_TYPE_IQ5_K || src0t == GGML_TYPE_IQ6_K || src0t == GGML_TYPE_IQ4_KS||
- src0t == GGML_TYPE_IQ4_KSS) {
- const int mem_size = src0t == GGML_TYPE_IQ6_K ? 128*sizeof(float) : GGML_TYPE_IQ5_K ? 64*sizeof(float) : 32*sizeof(float);
+ src0t == GGML_TYPE_IQ4_KSS || src0t == GGML_TYPE_IQ5_KS) {
+ const int mem_size = src0t == GGML_TYPE_IQ6_K ? 128*sizeof(float)
+ : src0t == GGML_TYPE_IQ5_K || src0t == GGML_TYPE_IQ5_KS ? 64*sizeof(float) : 32*sizeof(float);
[encoder setThreadgroupMemoryLength:mem_size atIndex:0];
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 3)/4, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
@@ -2926,6 +2955,7 @@ static void ggml_metal_encode_node(
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS ].pipeline; break;
case GGML_TYPE_IQ4_KSS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KSS].pipeline; break;
+ case GGML_TYPE_IQ5_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_KS ].pipeline; break;
case GGML_TYPE_IQ2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_K ].pipeline; break;
case GGML_TYPE_IQ2_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_KS ].pipeline; break;
case GGML_TYPE_IQ3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_K ].pipeline; break;
diff --git a/ggml/src/ggml-metal.metal b/ggml/src/ggml-metal.metal
index d3a2858c..b792844d 100644
--- a/ggml/src/ggml-metal.metal
+++ b/ggml/src/ggml-metal.metal
@@ -6276,6 +6276,117 @@ void kernel_mul_mv_iq4_ks_f32_impl(
}
}
+// TODO
+void kernel_mul_mv_iq5_ks_f32_impl(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ int64_t ne00,
+ int64_t ne01,
+ int64_t ne02,
+ int64_t ne10,
+ int64_t ne12,
+ int64_t ne0,
+ int64_t ne1,
+ uint r2,
+ uint r3,
+ threadgroup int8_t * shared_values_i8,
+ uint3 tgpig,
+ uint tiisg,
+ uint sgitg) {
+
+ threadgroup float * shared_values = (threadgroup float *)shared_values_i8;
+ const int nb = ne00/QK_K;
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+ const int first_row = (r0 * 2 + sgitg) * 2;
+
+ const uint i12 = im%ne12;
+ const uint i13 = im/ne12;
+
+ const uint row_size = 4 + nb*sizeof(block_iq5_ks);
+ const uint offset0 = (i12/r2)*ne01 + (i13/r3)*(ne01*ne02);
+ device const char * cx = (device const char *)src0 + (first_row + offset0)*row_size;
+ device const float * y = (device const float *)src1 + r1*ne10 + im*ne00*ne1;
+
+ const int ix = tiisg/16; // 0 or 1
+ const int it = tiisg%16; // 0...15
+ const int ib64 = it/4; // 0...3
+ const int il64 = it%4; // 0...3
+
+ shared_values[2*tiisg+0] = kvalues_iq5k_f[2*tiisg+0];
+ shared_values[2*tiisg+1] = kvalues_iq5k_f[2*tiisg+1];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ float4 yl[4];
+ float2 sumf = 0.f;
+ float d[2];
+
+ device const float * yb = y + ix * QK_K + ib64 * 64 + il64 * 8;
+
+ uint32_t aux32[2];
+ thread const uint8_t * q8 = (thread const uint8_t *)aux32;
+
+ float4 qf1, qf2;
+
+ device const float * dptr = (device const float *)cx;
+ d[0] = *dptr;
+ device const block_iq5_ks * x = (device const block_iq5_ks *)(dptr + 1) + ix;
+ dptr += row_size/4;
+ d[1] = *dptr;
+
+ for (int ibl = ix; ibl < nb; ibl += 2) {
+
+ device const float4 * y4 = (device const float4 *)yb;
+ yl[0] = y4[0]; yl[1] = y4[8]; yl[2] = y4[1]; yl[3] = y4[9];
+
+ device const uint8_t * scales = x->scales;
+
+ for (int row = 0; row < 2; ++row) {
+
+ threadgroup const float * values1 = shared_values + ((scales[2*ib64+0] & 1) << 5);
+ threadgroup const float * values2 = shared_values + ((scales[2*ib64+1] & 1) << 5);
+ const float ls1 = ((scales[2*ib64+0] & 254) - 127);
+ const float ls2 = ((scales[2*ib64+1] & 254) - 127);
+
+ device const uint32_t * q4 = (device const uint32_t *)scales + QK_K/128 + 8*ib64 + 2*il64;
+ device const uint32_t * qh = (device const uint32_t *)scales + QK_K/128 + QK_K/8 + 2*il64;
+
+ float4 acc1 = {0.f}, acc2 = {0.f};
+
+ uint32_t h = qh[0] >> 2*ib64;
+ aux32[0] = ((q4[0] >> 0) & 0x0f0f0f0f) | ((h << 4) & 0x10101010);
+ aux32[1] = ((q4[0] >> 4) & 0x0f0f0f0f) | ((h << 3) & 0x10101010);
+ qf1 = {values1[q8[0]], values1[q8[1]], values1[q8[2]], values1[q8[3]]};
+ qf2 = {values2[q8[4]], values2[q8[5]], values2[q8[6]], values2[q8[7]]};
+ acc1 += yl[0] * qf1;
+ acc2 += yl[1] * qf2;
+
+ h = qh[1] >> 2*ib64;
+ aux32[0] = ((q4[1] >> 0) & 0x0f0f0f0f) | ((h << 4) & 0x10101010);
+ aux32[1] = ((q4[1] >> 4) & 0x0f0f0f0f) | ((h << 3) & 0x10101010);
+ qf1 = {values1[q8[0]], values1[q8[1]], values1[q8[2]], values1[q8[3]]};
+ qf2 = {values2[q8[4]], values2[q8[5]], values2[q8[6]], values2[q8[7]]};
+ acc1 += yl[2] * qf1;
+ acc2 += yl[3] * qf2;
+
+ sumf[row] += ls1 * (acc1[0] + acc1[1] + acc1[2] + acc1[3]) + ls2 * (acc2[0] + acc2[1] + acc2[2] + acc2[3]);
+
+ scales += row_size;
+
+ }
+
+ yb += 2 * QK_K;
+ x += 2;
+ }
+
+ sumf = simd_sum(sumf);
+ if (tiisg < 2) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + tiisg] = sumf[tiisg] * d[tiisg];
+ }
+}
+
void kernel_mul_mv_iq4_kss_f32_impl(
device const void * src0,
device const float * src1,
@@ -7315,6 +7426,35 @@ kernel void kernel_mul_mv_iq4_ks_f32(
kernel_mul_mv_iq4_ks_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg);
}
+[[host_name("kernel_mul_mv_iq5_ks_f32")]]
+kernel void kernel_mul_mv_iq5_ks_f32(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01,
+ constant int64_t & ne02,
+ constant uint64_t & nb00,
+ constant uint64_t & nb01,
+ constant uint64_t & nb02,
+ constant int64_t & ne10,
+ constant int64_t & ne11,
+ constant int64_t & ne12,
+ constant uint64_t & nb10,
+ constant uint64_t & nb11,
+ constant uint64_t & nb12,
+ constant int64_t & ne0,
+ constant int64_t & ne1,
+ constant uint & r2,
+ constant uint & r3,
+ threadgroup int8_t * shared_values [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq5_ks_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg);
+}
+
[[host_name("kernel_mul_mv_iq4_kss_f32")]]
kernel void kernel_mul_mv_iq4_kss_f32(
device const void * src0,
@@ -7931,6 +8071,25 @@ void dequantize_iq4_ks(device const block_iq4_ks * xb, short il, thread type4x4
}
template <typename type4x4>
+void dequantize_iq5_ks(device const block_iq5_ks * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const int ib32 = il/2;
+ device const uint32_t * q4 = (device const uint32_t *)xb->qs + 8*(ib32/2) + 4*(il%2);
+ device const uint32_t * qh = (device const uint32_t *)xb->qh + 4*(il%2);
+ const float ls = (xb->scales[ib32] & 254) - 127;
+ constant float * values = kvalues_iq5k_f + ((xb->scales[ib32] & 1) << 5);
+ uint32_t aux32;
+ thread const uint8_t * q8 = (thread const uint8_t *)&aux32;
+ for (int i = 0; i < 4; ++i) {
+ aux32 = ((q4[i] >> 4*(ib32%2)) & 0x0f0f0f0f) | (((qh[i] >> ib32) & 0x01010101) << 4);
+ reg[i][0] = ls * values[q8[0]];
+ reg[i][1] = ls * values[q8[1]];
+ reg[i][2] = ls * values[q8[2]];
+ reg[i][3] = ls * values[q8[3]];
+ }
+}
+
+template <typename type4x4>
void dequantize_iq4_kss(device const block_iq4_kss * xb, short il, thread type4x4 & reg) {
// il is 0...15 for QK_K = 256 => index of block of 32 is il/2
const int ib32 = il/2;
@@ -8687,6 +8846,7 @@ template [[host_name("kernel_get_rows_iq6_k")]] kernel get_rows_q_t kernel_get
template [[host_name("kernel_get_rows_iq1_bn")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRSBN<float4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>>;
template [[host_name("kernel_get_rows_iq2_bn")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRSBN<float4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>>;
template [[host_name("kernel_get_rows_iq4_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>>;
+template [[host_name("kernel_get_rows_iq5_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>>;
template [[host_name("kernel_get_rows_iq4_kss")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>>;
template [[host_name("kernel_get_rows_iq2_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq2_ks, half, 16, dequantize_iq2_ks>>;
@@ -8730,6 +8890,7 @@ template [[host_name("kernel_mul_mm_iq6_k_f32")]] kernel mat_mm_t kernel_mul_m
template [[host_name("kernel_mul_mm_iq1_bn_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>, float>;
template [[host_name("kernel_mul_mm_iq2_bn_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>, float>;
template [[host_name("kernel_mul_mm_iq4_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>, float>;
+template [[host_name("kernel_mul_mm_iq5_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>, float>;
template [[host_name("kernel_mul_mm_iq4_kss_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>, float>;
template [[host_name("kernel_mul_mm_iq2_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq2_ks, half, 16, dequantize_iq2_ks>, float>;
@@ -8764,6 +8925,7 @@ template [[host_name("kernel_mul_mm_iq6_k_f16")]] kernel mat_mm_t kernel_mul_m
template [[host_name("kernel_mul_mm_iq1_bn_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>, half>;
template [[host_name("kernel_mul_mm_iq2_bn_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRSBN<half4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>, half>;
template [[host_name("kernel_mul_mm_iq4_ks_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>, half>;
+template [[host_name("kernel_mul_mm_iq5_ks_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>, half>;
template [[host_name("kernel_mul_mm_iq4_kss_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>, half>;
template [[host_name("kernel_mul_mm_iq2_ks_f16")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq2_ks, half, 16, dequantize_iq2_ks>, half>;
@@ -8805,6 +8967,7 @@ template [[host_name("kernel_mul_mm_id_iq6_k_f32")]] kernel mat_mm_id_t kernel
template [[host_name("kernel_mul_mm_id_iq1_bn_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRSBN<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn, true>>;
template [[host_name("kernel_mul_mm_id_iq2_bn_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRSBN<half4x4, block_iq2_bn, float, 4, dequantize_iq2_bn>>;
template [[host_name("kernel_mul_mm_id_iq4_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>>;
+template [[host_name("kernel_mul_mm_id_iq5_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq5_ks, float, 16, dequantize_iq5_ks>>;
template [[host_name("kernel_mul_mm_id_iq4_kss_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq4_kss,float, 16, dequantize_iq4_kss>>;
template [[host_name("kernel_mul_mm_id_iq2_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq2_ks, half, 16, dequantize_iq2_ks>>;
@@ -9021,6 +9184,7 @@ template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t
template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_ks_f32_impl>>;
+template [[host_name("kernel_mul_mv_id_iq5_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq5_ks_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_kss_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_kss_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq2_k_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_k_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq2_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_ks_f32_impl>>;
diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c
index cc1c8fc6..8ebb0d32 100644
--- a/ggml/src/ggml-quants.c
+++ b/ggml/src/ggml-quants.c
@@ -15427,6 +15427,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
case GGML_TYPE_IQ6_K: break;
case GGML_TYPE_IQ4_KS: break;
case GGML_TYPE_IQ4_KSS: break;
+ case GGML_TYPE_IQ5_KS: break;
case GGML_TYPE_IQ4_NL_R4: break;
case GGML_TYPE_IQ4_XS_R8: break;
case GGML_TYPE_IQ2_XXS_R4: break;
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 94defa47..bc103ab7 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1356,6 +1356,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 4,
},
+ [GGML_TYPE_IQ5_KS] = {
+ .type_name = "iq5_ks",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq5_ks),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq5_ks,
+ .from_float = quantize_row_iq5_ks,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq5_ks_ref,
+ .vec_dot = vec_dot_iq5_ks_q8_k,
+ .vec_dot_type = GGML_TYPE_Q8_K,
+ .nrows = 1,
+ .row_meta_size = 4,
+ },
[GGML_TYPE_Q8_K] = {
.type_name = "q8_K",
.blck_size = QK_K,
@@ -4466,6 +4479,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ4_KS: wtype = GGML_TYPE_IQ4_KS; break;
case GGML_FTYPE_MOSTLY_IQ4_KS_R4: wtype = GGML_TYPE_IQ4_KS_R4;break;
case GGML_FTYPE_MOSTLY_IQ4_KSS: wtype = GGML_TYPE_IQ4_KSS; break;
+ case GGML_FTYPE_MOSTLY_IQ5_KS: wtype = GGML_TYPE_IQ5_KS; break;
case GGML_FTYPE_MOSTLY_IQ2_K: wtype = GGML_TYPE_IQ2_K; break;
case GGML_FTYPE_MOSTLY_IQ2_K_R4: wtype = GGML_TYPE_IQ2_K_R4; break;
case GGML_FTYPE_MOSTLY_IQ2_KS: wtype = GGML_TYPE_IQ2_KS; break;
@@ -11229,6 +11243,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
@@ -11701,6 +11716,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
@@ -11870,6 +11886,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
@@ -15366,6 +15383,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
@@ -15775,6 +15793,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
@@ -16090,6 +16109,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
@@ -16722,6 +16742,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ4_KS_R4:
case GGML_TYPE_IQ4_KSS:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ2_K_R4:
case GGML_TYPE_IQ2_KS:
@@ -23790,6 +23811,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ4_KS: result = quantize_iq4_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_KS_R4:result = quantize_iq4_ks_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_KSS: result = quantize_iq4_kss(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ5_KS: result = quantize_iq5_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_K: result = quantize_iq2_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_K_R4:result = quantize_iq2_k_r4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_KS: result = quantize_iq2_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index 6c3a3575..8c649de4 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -2383,6 +2383,79 @@ struct DequantizerIQ4KS final : public BaseDequantizer<block_iq4_ks, true> {
};
};
+struct DequantizerIQ5KS final : public BaseDequantizer<block_iq5_ks, true> {
+ DequantizerIQ5KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx) { load_values(values); }
+ template <typename Q8>
+ inline void new_block(int i, const Q8& q8, __m256 * accm, __m512i * scales) {
+ auto scales128 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i *)x[i].scales));
+ auto shifts = _mm_and_si128(_mm_cmpeq_epi16(_mm_and_si128(scales128, m1), m1), m2);
+ scales128 = _mm_add_epi16(_mm_and_si128(scales128, mask), m127);
+ auto scales_s = _mm_mullo_epi16(scales128, _mm_add_epi16(m128, shifts));
+ s8k.accum_mins(scales_s, q8, i, d, accm);
+ auto scales256 = MM256_SET_M128I(scales128, scales128);
+ auto all_scales = _mm512_inserti32x8(_mm512_castsi256_si512(scales256), scales256, 1);
+ scales[0] = _mm512_shuffle_epi8(all_scales, shuffles[0]);
+ scales[1] = _mm512_shuffle_epi8(all_scales, shuffles[1]);
+ scales[2] = _mm512_shuffle_epi8(all_scales, shuffles[2]);
+ scales[3] = _mm512_shuffle_epi8(all_scales, shuffles[3]);
+ prepare(x[i].qs, x[i].qh);
+ }
+ inline void prepare(const uint8_t * q4, const uint8_t * qh) {
+ bits.prepare64(q4);
+ auto h256 = _mm256_loadu_si256((const __m256i *)qh);
+ auto hbits = _mm512_inserti32x8(_mm512_castsi256_si512(h256), _mm256_srli_epi16(h256, 2), 1);
+ auto m1 = _mm512_cmpeq_epi8_mask(_mm512_and_si512(hbits, hmask1), hmask1);
+ auto m2 = _mm512_cmpeq_epi8_mask(_mm512_and_si512(hbits, hmask2), hmask2);
+ bits.values[0] = _mm512_mask_shuffle_epi8(_mm512_maskz_shuffle_epi8(_knot_mask64(m1), values[0], bits.values[0]), m1, values[1], bits.values[0]);
+ bits.values[1] = _mm512_mask_shuffle_epi8(_mm512_maskz_shuffle_epi8(_knot_mask64(m2), values[0], bits.values[1]), m2, values[1], bits.values[1]);
+ hbits = _mm512_srli_epi16(hbits, 4);
+ m1 = _mm512_cmpeq_epi8_mask(_mm512_and_si512(hbits, hmask1), hmask1);
+ m2 = _mm512_cmpeq_epi8_mask(_mm512_and_si512(hbits, hmask2), hmask2);
+ bits.values[2] = _mm512_mask_shuffle_epi8(_mm512_maskz_shuffle_epi8(_knot_mask64(m1), values[0], bits.values[2]), m1, values[1], bits.values[2]);
+ bits.values[3] = _mm512_mask_shuffle_epi8(_mm512_maskz_shuffle_epi8(_knot_mask64(m2), values[0], bits.values[3]), m2, values[1], bits.values[3]);
+ // We now have in bits.valuse[0]: 0...31, 64...95
+ // bits.valuse[1]: 32..63, 96..127
+ // etc.
+ auto tmp = _mm512_permutex2var_epi64(bits.values[0], permute1, bits.values[1]);
+ bits.values[1] = _mm512_permutex2var_epi64(bits.values[0], permute2, bits.values[1]);
+ bits.values[0] = tmp;
+ tmp = _mm512_permutex2var_epi64(bits.values[2], permute1, bits.values[3]);
+ bits.values[3] = _mm512_permutex2var_epi64(bits.values[2], permute2, bits.values[3]);
+ bits.values[2] = tmp;
+ }
+ static void load_values(__m512i * values) {
+ static const uint8_t kvalues_iq5nl[32] = {
+ 2, 14, 25, 36, 45, 54, 63, 71, 78, 85, 92, 98, 104, 110, 116, 122, 127,
+ 133, 139, 145, 151, 157, 164, 171, 179, 187, 196, 205, 215, 225, 237, 249,
+ };
+ auto values128_1 = _mm_loadu_si128((const __m128i *)kvalues_iq5nl + 0);
+ auto values128_2 = _mm_loadu_si128((const __m128i *)kvalues_iq5nl + 1);
+ auto values256_1 = MM256_SET_M128I(values128_1, values128_1);
+ auto values256_2 = MM256_SET_M128I(values128_2, values128_2);
+ values[0] = _mm512_inserti32x8(_mm512_castsi256_si512(values256_1), values256_1, 1);
+ values[1] = _mm512_inserti32x8(_mm512_castsi256_si512(values256_2), values256_2, 1);
+ }
+
+ Q4Bits bits;
+ Scales8KBase s8k;
+ __m512i values[2];
+ const __m512i hmask1 = _mm512_set1_epi8(1);
+ const __m512i hmask2 = _mm512_set1_epi8(2);
+ const __m512i permute1 = _mm512_set_epi64(11, 10, 9, 8, 3, 2, 1, 0);
+ const __m512i permute2 = _mm512_set_epi64(15, 14, 13, 12, 7, 6, 5, 4);
+ const __m128i m127 = _mm_set1_epi16(-127);
+ const __m128i m128 = _mm_set1_epi16(-128);
+ const __m128i mask = _mm_set1_epi16(254);
+ const __m128i m1 = _mm_set1_epi16(1);
+ const __m128i m2 = _mm_set1_epi16(2);
+ const __m512i shuffles[4] = {
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0100), _mm256_set1_epi16(0x0302), 1),
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0504), _mm256_set1_epi16(0x0706), 1),
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0908), _mm256_set1_epi16(0x0b0a), 1),
+ _mm512_inserti32x8(_mm512_set1_epi16(0x0d0c), _mm256_set1_epi16(0x0f0e), 1),
+ };
+};
+
struct DequantizerIQ4KSS final : public BaseDequantizer<block_iq4_kss, true> {
DequantizerIQ4KSS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_512()) {}
template <typename Q8>
@@ -2977,6 +3050,53 @@ struct DequantizerIQ4KS final : public BaseDequantizer<block_iq4_ks, true> {
const __m128i m4 = _mm_set1_epi16(4);
};
+struct DequantizerIQ5KS final : public BaseDequantizer<block_iq5_ks, true> {
+ DequantizerIQ5KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx) { load_values(values); }
+ template <typename Q8>
+ inline __m256i new_block(int i, const Q8& q8, __m256 * accd) {
+ hbits = _mm256_loadu_si256((const __m256i *)x[i].qh);
+ auto scales128 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i *)x[i].scales));
+ auto shifts = _mm_and_si128(_mm_cmpeq_epi16(_mm_and_si128(scales128, m1), m1), m2);
+ scales128 = _mm_add_epi16(_mm_and_si128(scales128, mask), m127);
+ auto scales_s = _mm_mullo_epi16(scales128, _mm_add_epi16(m128, shifts));
+ s8k.accum_mins(scales_s, q8, i, d, accd);
+ return MM256_SET_M128I(scales128, scales128);
+ }
+ inline void prepare(int i, int j) {
+ bits.prepare(x[i].qs, j);
+ auto h = j == 0 ? hbits : _mm256_srli_epi16(hbits, 4);
+ for (int k = 0; k < 4; ++k) {
+ auto qh = _mm256_and_si256(_mm256_slli_epi16(h, 7-k), mh);
+ auto q5vl = _mm256_or_si256(bits.values[k], qh);
+ auto q5vh = _mm256_or_si256(bits.values[k], _mm256_xor_si256(qh, mh));
+ bits.values[k] = _mm256_or_si256(_mm256_shuffle_epi8(values[0], q5vl), _mm256_shuffle_epi8(values[1], q5vh));
+ }
+ }
+ static void load_values(__m256i * values) {
+ static const uint8_t kvalues_iq5nl[32] = {
+ 2, 14, 25, 36, 45, 54, 63, 71, 78, 85, 92, 98, 104, 110, 116, 122, 127,
+ 133, 139, 145, 151, 157, 164, 171, 179, 187, 196, 205, 215, 225, 237, 249,
+ };
+ auto values128_1 = _mm_loadu_si128((const __m128i *)kvalues_iq5nl + 0);
+ auto values128_2 = _mm_loadu_si128((const __m128i *)kvalues_iq5nl + 1);
+ values[0] = MM256_SET_M128I(values128_1, values128_1);
+ values[1] = MM256_SET_M128I(values128_2, values128_2);
+ }
+
+ Q4Bits bits;
+ Scales8KBase s8k;
+ __m256i hbits;
+ __m256i values[2];
+ const __m128i maskl = _mm_set1_epi8(0xf);
+ const __m128i maskh = _mm_set1_epi8(0x30);
+ const __m256i mh = _mm256_set1_epi8(-128); // to avoid stupid warning about 0x80 overflowing
+ const __m128i mask = _mm_set1_epi16(254);
+ const __m128i m127 = _mm_set1_epi16(-127);
+ const __m128i m128 = _mm_set1_epi16(-128);
+ const __m128i m1 = _mm_set1_epi16(1);
+ const __m128i m2 = _mm_set1_epi16(2);
+};
+
struct DequantizerIQ4KSS final : public BaseDequantizer<block_iq4_kss, true> {
DequantizerIQ4KSS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_256()) {}
template <typename Q8>
@@ -9455,6 +9575,7 @@ template <typename Dequantizer> void MulMat::set_functions(MulMat& m) {
std::is_same_v<Dequantizer, DequantizerIQ3K> ||
std::is_same_v<Dequantizer, DequantizerIQ4XS>||
std::is_same_v<Dequantizer, DequantizerIQ4KS>||
+ std::is_same_v<Dequantizer, DequantizerIQ5KS>||
std::is_same_v<Dequantizer, DequantizerIQ4KSS>) {
m.funcs[0] = mul_mat_iqX_k_q8_K_AVX512<Dequantizer, 1>;
m.funcs[1] = mul_mat_iqX_k_q8_K_AVX512<Dequantizer, 2>;
@@ -9620,6 +9741,10 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
assert (ne00 % QK_K == 0);
MulMat::set_functions<DequantizerIQ4KS>(mm);
break;
+ case GGML_TYPE_IQ5_KS:
+ assert (ne00 % QK_K == 0);
+ MulMat::set_functions<DequantizerIQ5KS>(mm);
+ break;
case GGML_TYPE_IQ4_KSS:
assert (ne00 % QK_K == 0);
MulMat::set_functions<DequantizerIQ4KSS>(mm);
@@ -10926,6 +11051,50 @@ struct DequantizerIQ4KS final : public BaseDequantizer<block_iq4_ks, true> {
const int16x8_t m127 = vdupq_n_s16(-127);
};
+struct DequantizerIQ5KS final : public BaseDequantizer<block_iq5_ks, true> {
+ DequantizerIQ5KS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc), values(vld1q_s8_x2(iq5nl_values)) {}
+
+ constexpr static int num_blocks() { return 8; }
+ constexpr static bool should_scale_quants() { return false; }
+
+ template <typename Q8>
+ inline int32x4x2_t new_block(int i, const Q8& q8, float32x4_t * acc) {
+ (void)q8;
+ (void)acc;
+ auto scales16 = vaddq_s16(vreinterpretq_s16_u16(vandq_u16(vmovl_u8(vld1_u8(x[i].scales)), mask)), m127);
+ int32x4x2_t scales = {vmovl_s16(vget_low_s16(scales16)), vmovl_s16(vget_high_s16(scales16))};
+ return scales;
+ }
+
+ inline void prepare(int i, int j) {
+ bits.prepare(x[i].qs+64*j);
+ if (j == 1) {
+ for (int k = 0; k < 2; ++k) hbits.val[k] = vshrq_n_u8(hbits.val[k], 4);
+ }
+ bits.b1.val[0] = vorrq_u8(bits.b1.val[0], vandq_u8(vshlq_n_u8(hbits.val[0], 4), hm));
+ bits.b1.val[1] = vorrq_u8(bits.b1.val[1], vandq_u8(vshlq_n_u8(hbits.val[1], 4), hm));
+ bits.b1.val[2] = vorrq_u8(bits.b1.val[2], vandq_u8(vshlq_n_u8(hbits.val[0], 3), hm));
+ bits.b1.val[3] = vorrq_u8(bits.b1.val[3], vandq_u8(vshlq_n_u8(hbits.val[1], 3), hm));
+ bits.b2.val[0] = vorrq_u8(bits.b2.val[0], vandq_u8(vshlq_n_u8(hbits.val[0], 2), hm));
+ bits.b2.val[1] = vorrq_u8(bits.b2.val[1], vandq_u8(vshlq_n_u8(hbits.val[1], 2), hm));
+ bits.b2.val[2] = vorrq_u8(bits.b2.val[2], vandq_u8(vshlq_n_u8(hbits.val[0], 1), hm));
+ bits.b2.val[3] = vorrq_u8(bits.b2.val[3], vandq_u8(vshlq_n_u8(hbits.val[1], 1), hm));
+ for (int k = 0; k < 4; ++k) {
+ bits.b1.val[k] = vqtbl2q_s8(values, bits.b1.val[k]);
+ bits.b2.val[k] = vqtbl2q_s8(values, bits.b2.val[k]);
+ }
+ }
+
+ Q4bits bits;
+ const int8x16x2_t values;
+ const uint8x16_t hshuff = vreinterpretq_u8_u32(uint32x4_t{0x09010800, 0x0b030a02, 0x0d050c04, 0x0f070e06});
+ const uint8x16_t hm = vdupq_n_u8(0x10);
+ const uint16x8_t mask = vdupq_n_u16(254);
+ const int16x8_t m127 = vdupq_n_s16(-127);
+ uint8x16x2_t hbits;
+
+};
+
struct DequantizerIQ4KSS final : public BaseDequantizer<block_iq4_kss, true> {
DequantizerIQ4KSS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc), values(vld1q_s8_x2(iq4k_values)) {}
@@ -14894,6 +15063,9 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
case GGML_TYPE_IQ5_K:
MulMat::set_functions<DequantizerIQ5K>(m);
break;
+ case GGML_TYPE_IQ5_KS:
+ MulMat::set_functions<DequantizerIQ5KS>(m);
+ break;
case GGML_TYPE_IQ6_K:
MulMat::set_functions<DequantizerIQ6K>(m);
break;
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp
index ca5e008a..78b25525 100644
--- a/ggml/src/iqk/iqk_quantize.cpp
+++ b/ggml/src/iqk/iqk_quantize.cpp
@@ -3419,6 +3419,250 @@ void vec_dot_iq4_ks_q8_k(int n, float * s, size_t bs, const void * vx, size_t b
}
namespace {
+static void quantize_row_iq5_ks_impl(const int super_block_size, const int block_size,
+ int n_per_row, const float * x, char * cy,
+ float * all_scales, float * weight,
+ const int8_t * values,
+ const float * quant_weights,
+ const int ntry) {
+
+ float * dptr = (float *)cy;
+ dptr[0] = 0;
+ block_iq5_ks * y = (block_iq5_ks *)(dptr + 1);
+
+ const int8_t * shifted_values = values + 32;
+
+ float amax_scale = 0;
+
+ for (int ibl = 0; ibl < n_per_row/super_block_size; ++ibl) {
+ memset(&y[ibl], 0, sizeof(block_iq5_ks));
+ const float * xbl = x + ibl*super_block_size;
+ auto scales = all_scales + ibl*(super_block_size/block_size);
+ float sigma2 = 0;
+ for (int j = 0; j < super_block_size; ++j) sigma2 += xbl[j]*xbl[j];
+ sigma2 *= 2.f/super_block_size;
+ for (int ib = 0; ib < super_block_size/block_size; ++ib) {
+ const float * xb = xbl + ib*block_size;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*super_block_size + ib*block_size;
+ for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
+ }
+ float amax = 0, max = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float ax = fabsf(xb[j]);
+ if (ax > amax) {
+ amax = ax; max = xb[j];
+ }
+ }
+ if (amax < 1e-15f) {
+ scales[ib] = 0;
+ continue;
+ }
+ float d = ntry > 0 ? -max/values[0] : max/values[0];
+ float id = 1/d;
+ float sumqx_p = 0, sumq2_p = 0;
+ float sumqx_m = 0, sumq2_m = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq5nl(values, al);
+ float q = values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq5nl(values, -al);
+ q = values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ d = sumqx_p/sumq2_p;
+ bool is_shifted = false;
+ float best = d*sumqx_p;
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d*sumqx_m;
+ }
+ for (int itry = -ntry; itry <= ntry; ++itry) {
+ id = (itry + values[0])/max;
+ sumqx_p = sumq2_p = 0;
+ sumqx_m = sumq2_m = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq5nl(values, al);
+ float q = values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq5nl(values, -al);
+ q = values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = false;
+ }
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = false;
+ }
+ id = (itry + shifted_values[0])/max;
+ sumqx_p = sumq2_p = 0;
+ sumqx_m = sumq2_m = 0;
+ for (int j = 0; j < block_size; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq5nl(shifted_values, al);
+ float q = shifted_values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq5nl(shifted_values, -al);
+ q = shifted_values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = true;
+ }
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = true;
+ }
+ }
+ if (is_shifted) y[ibl].scales[ib] = 0x01;
+ scales[ib] = d;
+ amax_scale = std::max(amax_scale, std::abs(d));
+ }
+ }
+ float d = amax_scale/127;
+ *dptr = d;
+ if (!d) return;
+ float id = d ? 1/d : 0.f;
+ float sumqx = 0, sumq2 = 0;
+ for (int ibl = 0; ibl < n_per_row/super_block_size; ++ibl) {
+ const float * xbl = x + ibl*super_block_size;
+ float sigma2 = 0;
+ for (int j = 0; j < super_block_size; ++j) sigma2 += xbl[j]*xbl[j];
+ sigma2 *= 2.f/super_block_size;
+ auto scales = all_scales + (super_block_size/block_size)*ibl;
+ for (int ib = 0; ib < super_block_size/block_size; ++ib) {
+ const int8_t * block_values = y[ibl].scales[ib] & 0x01 ? shifted_values : values;
+ int l = nearest_int(0.5f*(id*scales[ib]+127.f));
+ l = std::max(0, std::min(127, l)) << 1;
+ y[ibl].scales[ib] |= l;
+ l -= 127;
+ float dl = d * l;
+ float idl = dl ? 1/dl : 0.f;
+ const float * xb = xbl + ib*block_size;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*super_block_size + ib*block_size;
+ for (int j = 0; j < block_size; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < block_size; ++j) weight[j] = xb[j]*xb[j];
+ }
+ for (int j = 0; j < block_size; ++j) {
+ uint8_t idx = best_index_iq5nl(block_values, idl*xb[j]);
+ y[ibl].qs[block_size*(ib/2) + j] |= ((idx & 0xf) << 4*(ib%2));
+ y[ibl].qh[j] |= ((idx >> 4) << ib);
+ float w = weight[j];
+ float q = block_values[idx]*l;
+ sumqx += w*q*xb[j];
+ sumq2 += w*q*q;
+ }
+ }
+ }
+ if (sumq2 > 0) *dptr = sumqx/sumq2;
+}
+}
+
+void quantize_row_iq5_ks_ref(const float * x, block_iq5_ks * y, int64_t k) {
+ quantize_iq5_ks(x, (void *)y, 1, k, nullptr);
+}
+
+void quantize_row_iq5_ks(const float * x, void * y, int64_t k) {
+ quantize_iq5_ks(x, (void *)y, 1, k, nullptr);
+}
+
+size_t quantize_iq5_ks(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+ constexpr int kBlockSize = 32;
+ GGML_ASSERT(n_per_row%QK_K == 0);
+ auto row_size = ggml_row_size(GGML_TYPE_IQ5_KS, n_per_row);
+ char * qrow = (char *)dst;
+ float weight[kBlockSize];
+ std::vector<float> all_scales(n_per_row/kBlockSize);
+ for (int64_t row = 0; row < nrows; ++row) {
+ quantize_row_iq5_ks_impl(QK_K, kBlockSize, n_per_row, src, qrow, all_scales.data(), weight, iq5nl_values, imatrix, 5);
+ src += n_per_row;
+ qrow += row_size;
+ }
+ return nrows * row_size;
+}
+
+void dequantize_row_iq5_ks(const block_iq5_ks * x, float * y, int64_t k) {
+ constexpr int kBlockSize = 32;
+ GGML_ASSERT(k%QK_K == 0);
+ const float * dptr = (const float *)x;
+ float d = *dptr;
+ x = (const block_iq5_ks *)(dptr + 1);
+ int nblock = k/QK_K;
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+ auto qs = x[ibl].qs;
+ auto qh = x[ibl].qh;
+ for (int ib64 = 0; ib64 < QK_K/(2*kBlockSize); ++ib64) {
+ float dl1 = d * ((int)(x[ibl].scales[2*ib64+0] & 254) - 127);
+ float dl2 = d * ((int)(x[ibl].scales[2*ib64+1] & 254) - 127);
+ const int8_t * values1 = iq5nl_values + ((x[ibl].scales[2*ib64+0] & 1) << 5);
+ const int8_t * values2 = iq5nl_values + ((x[ibl].scales[2*ib64+1] & 1) << 5);
+ for (int j = 0; j < kBlockSize; ++j) {
+ y[j ] = dl1 * values1[(qs[j] & 0xf) | (((qh[j] >> (2*ib64+0)) & 1) << 4)];
+ y[j+kBlockSize] = dl2 * values2[(qs[j] >> 4) | (((qh[j] >> (2*ib64+1)) & 1) << 4)];
+ }
+ y += 2*kBlockSize;
+ qs += kBlockSize;
+ }
+ }
+}
+
+void vec_dot_iq5_ks_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+ constexpr int kBlockSize = 32;
+#if GGML_USE_IQK_MULMAT
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ5_KS, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
+ return;
+ }
+#endif
+ GGML_ASSERT(n%QK_K == 0);
+ GGML_ASSERT(nrc == 1);
+ GGML_UNUSED(bs);
+ GGML_UNUSED(bx);
+ GGML_UNUSED(by);
+ const float * dptr = (const float *)vx;
+ const float d = *dptr;
+ const block_iq5_ks * x = (const block_iq5_ks *)(dptr + 1);
+ const block_q8_K * y = (const block_q8_K *)vy;
+ int nblock = n/QK_K;
+ float sumf = 0;
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+ auto qy = y[ibl].qs;
+ auto qs = x[ibl].qs;
+ auto qh = x[ibl].qh;
+ float db = d * y[ibl].d;
+ for (int ib64 = 0; ib64 < QK_K/(2*kBlockSize); ++ib64) {
+ float dl1 = db * ((int)(x[ibl].scales[2*ib64+0] & 254) - 127);
+ float dl2 = db * ((int)(x[ibl].scales[2*ib64+1] & 254) - 127);
+ const int8_t * values1 = iq5nl_values + ((x[ibl].scales[2*ib64+0] & 1) << 5);
+ const int8_t * values2 = iq5nl_values + ((x[ibl].scales[2*ib64+1] & 1) << 5);
+ int suml1 = 0;
+ int suml2 = 0;
+ for (int j = 0; j < kBlockSize; ++j) {
+ suml1 += qy[j ] * values1[(qs[j] & 0xf) | (((qh[j] >> (2*ib64+0)) & 1) << 4)];
+ suml2 += qy[j+kBlockSize] * values2[(qs[j] >> 4) | (((qh[j] >> (2*ib64+1)) & 1) << 4)];
+ }
+ sumf += dl1*suml1 + dl2*suml2;
+ y += 2*kBlockSize;
+ qs += kBlockSize;
+ }
+ }
+ *s = sumf;
+}
+
+namespace {
const uint16_t * scramble_table() {
static std::mutex mutex;
static std::vector<uint16_t> table;
diff --git a/ggml/src/iqk/iqk_quantize.h b/ggml/src/iqk/iqk_quantize.h
index 24db374b..0533d1f7 100644
--- a/ggml/src/iqk/iqk_quantize.h
+++ b/ggml/src/iqk/iqk_quantize.h
@@ -67,6 +67,12 @@ size_t quantize_iq2_ks(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst
void dequantize_row_iq2_ks(const block_iq2_ks * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_iq2_ks_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+void quantize_row_iq5_ks_ref(const float * GGML_RESTRICT x, block_iq5_ks * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq5_ks(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+size_t quantize_iq5_ks(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
+void dequantize_row_iq5_ks(const block_iq5_ks * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
+void vec_dot_iq5_ks_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+
void quantize_row_iq4_nl_r4_ref(const float * GGML_RESTRICT x, block_iq4_nl_r4 * GGML_RESTRICT y, int64_t k);
void quantize_row_iq4_nl_r4(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
size_t quantize_iq4_nl_r4(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
diff --git a/include/llama.h b/include/llama.h
index 0f3ae862..98b08bbd 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -193,6 +193,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_IQ2_KS = 147, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_KSS = 148, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q8_KV = 149, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_IQ5_KS = 150, // except 1d tensors
//
LLAMA_FTYPE_MOSTLY_Q4_0_R8 = 202, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q8_0_R8 = 207, // except 1d tensors
@@ -231,7 +232,7 @@ extern "C" {
LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
LLAMA_ROPE_SCALING_TYPE_YARN = 2,
LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3,
- LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE,
+ LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE,
};
enum llama_pooling_type {
diff --git a/src/llama.cpp b/src/llama.cpp
index 9369d10e..838451f6 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -4373,6 +4373,7 @@ struct llama_model_loader {
case GGML_TYPE_IQ4_KS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_KS; break;
case GGML_TYPE_IQ4_KS_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ4_KS_R4; break;
case GGML_TYPE_IQ4_KSS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_KSS; break;
+ case GGML_TYPE_IQ5_KS: ftype = LLAMA_FTYPE_MOSTLY_IQ5_KS; break;
case GGML_TYPE_IQ2_K: ftype = LLAMA_FTYPE_MOSTLY_IQ2_K; break;
case GGML_TYPE_IQ2_K_R4:ftype = LLAMA_FTYPE_MOSTLY_IQ2_K_R4;break;
case GGML_TYPE_IQ3_K: ftype = LLAMA_FTYPE_MOSTLY_IQ3_K; break;
@@ -5109,6 +5110,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
case LLAMA_FTYPE_MOSTLY_IQ4_KS: return "IQ4_KS - 4.25 bpw";
case LLAMA_FTYPE_MOSTLY_IQ4_KS_R4:return "IQ4_KS_R4 - 4.25 bpw";
case LLAMA_FTYPE_MOSTLY_IQ4_KSS: return "IQ4_KSS - 4.0 bpw";
+ case LLAMA_FTYPE_MOSTLY_IQ5_KS: return "IQ5_KS - 5.25 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_K: return "IQ2_K - 2.375 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_K_R4: return "IQ2_K_R4 - 2.375 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_K: return "IQ3_K - 3.4325 bpw";
@@ -18619,7 +18621,7 @@ static ggml_type change_type_if_necessary(ggml_type new_type, int nx, int ny) {
new_type == GGML_TYPE_IQ4_K_R4|| new_type == GGML_TYPE_Q8_K_R8 || new_type == GGML_TYPE_IQ3_K_R4||
new_type == GGML_TYPE_IQ2_K_R4|| new_type == GGML_TYPE_IQ5_K_R4|| new_type == GGML_TYPE_IQ4_KS_R4 ||
new_type == GGML_TYPE_IQ3_XXS_R4 || new_type == GGML_TYPE_IQ2_XXS_R4 || new_type == GGML_TYPE_IQ2_XS_R4 ||
- new_type == GGML_TYPE_IQ2_S_R4|| new_type == GGML_TYPE_IQ3_S_R4) {
+ new_type == GGML_TYPE_IQ2_S_R4|| new_type == GGML_TYPE_IQ3_S_R4|| new_type == GGML_TYPE_IQ5_KS) {
if (nx % QK_K != 0) {
LLAMA_LOG_WARN("\n\n%s : tensor cols %d x %d are not divisible by %d, required for %s", __func__, nx, ny, QK_K, ggml_type_name(new_type));
convert_incompatible_tensor = true;
@@ -18661,6 +18663,7 @@ static ggml_type change_type_if_necessary(ggml_type new_type, int nx, int ny) {
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ4_K_R4:
case GGML_TYPE_Q4_K_R4:
+ case GGML_TYPE_IQ5_KS:
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ5_K_R4:
@@ -19321,6 +19324,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_IQ4_KS: default_type = GGML_TYPE_IQ4_KS; break;
case LLAMA_FTYPE_MOSTLY_IQ4_KS_R4:default_type = GGML_TYPE_IQ4_KS_R4;break;
case LLAMA_FTYPE_MOSTLY_IQ4_KSS: default_type = GGML_TYPE_IQ4_KSS; break;
+ case LLAMA_FTYPE_MOSTLY_IQ5_KS: default_type = GGML_TYPE_IQ5_KS; break;
case LLAMA_FTYPE_MOSTLY_IQ2_K: default_type = GGML_TYPE_IQ2_K; break;
case LLAMA_FTYPE_MOSTLY_IQ2_K_R4:default_type = GGML_TYPE_IQ2_K_R4;break;
case LLAMA_FTYPE_MOSTLY_IQ3_K: default_type = GGML_TYPE_IQ3_K; break;