summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIwan Kawrakow <iwan.kawrakow@gmail.com>2024-07-29 12:38:46 +0300
committerKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-08-01 09:38:06 +0200
commit5d341757bc73efee0deba07a17679a965037753b (patch)
tree33db7a06b6b3ae1067a1d3db36192926705c678f
parent06e255ac9da49cabde466b9ef8b3c09c0f8d8dd1 (diff)
iq5_k: Basics
Quantize/dequantize, CUDA dequantize
-rw-r--r--examples/quantize/quantize.cpp1
-rw-r--r--ggml/include/ggml.h10
-rw-r--r--ggml/src/ggml-common.h28
-rw-r--r--ggml/src/ggml-cuda.cu1
-rw-r--r--ggml/src/ggml-cuda/common.cuh7
-rw-r--r--ggml/src/ggml-cuda/convert.cu37
-rw-r--r--ggml/src/ggml-cuda/mmvq.cu12
-rw-r--r--ggml/src/ggml-cuda/vecdotq.cuh32
-rw-r--r--ggml/src/ggml-quants.c1
-rw-r--r--ggml/src/ggml.c53
-rw-r--r--ggml/src/iqk/iqk_quantize.cpp604
-rw-r--r--ggml/src/iqk/iqk_quantize.h16
-rw-r--r--include/llama.h5
-rw-r--r--src/llama.cpp13
14 files changed, 646 insertions, 174 deletions
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 5f599c65..17e87e53 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -42,6 +42,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", },
{ "IQ2_K", LLAMA_FTYPE_MOSTLY_IQ2_K, " 2.375 bpw non-linear quantization",},
{ "IQ4_K", LLAMA_FTYPE_MOSTLY_IQ4_K, " 4.5 bpw non-linear quantization", },
+ { "IQ5_K", LLAMA_FTYPE_MOSTLY_IQ5_K, " 5.5 bpw non-linear quantization", },
{ "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", },
{ "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.59G, +0.0992 ppl @ LLaMA-v1-7B", },
{ "Q4_K_M", LLAMA_FTYPE_MOSTLY_Q4_K_M, " 3.80G, +0.0532 ppl @ LLaMA-v1-7B", },
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index 2cb4af32..b7585ad6 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -389,8 +389,9 @@ extern "C" {
GGML_TYPE_IQ1_BN = 34,
GGML_TYPE_IQ2_BN = 35,
GGML_TYPE_Q8_K64 = 36,
- GGML_TYPE_IQ4_K = 37,
- GGML_TYPE_IQ2_K = 38,
+ GGML_TYPE_IQ2_K = 37,
+ GGML_TYPE_IQ4_K = 38,
+ GGML_TYPE_IQ5_K = 39,
GGML_TYPE_COUNT,
};
@@ -437,8 +438,9 @@ extern "C" {
GGML_FTYPE_MOSTLY_Q4_0_8_8 = 27, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ1_BN = 28, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ2_BN = 29, // except 1d tensors
- GGML_FTYPE_MOSTLY_IQ4_K = 30, // except 1d tensors
- GGML_FTYPE_MOSTLY_IQ2_K = 31, // except 1d tensors
+ GGML_FTYPE_MOSTLY_IQ2_K = 30, // except 1d tensors
+ GGML_FTYPE_MOSTLY_IQ4_K = 31, // except 1d tensors
+ GGML_FTYPE_MOSTLY_IQ5_K = 32, // except 1d tensors
};
// available tensor operations:
diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h
index 9466dfcf..64268696 100644
--- a/ggml/src/ggml-common.h
+++ b/ggml/src/ggml-common.h
@@ -448,6 +448,14 @@ static_assert(sizeof(block_iq4_xs) == sizeof(ggml_half) + sizeof(uint16_t) + QK_
typedef struct {
ggml_half d;
uint16_t extra;
+ uint8_t scales[QK_K/32];
+ uint8_t qs[QK_K/4];
+} block_iq2_k;
+static_assert(sizeof(block_iq2_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/32 + QK_K/4, "wrong iq2_k block size/padding");
+
+typedef struct {
+ ggml_half d;
+ uint16_t extra;
uint8_t scales_h[QK_K/64];
uint8_t scales_l[QK_K/32];
uint8_t qs[QK_K/2];
@@ -457,10 +465,13 @@ static_assert(sizeof(block_iq4_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K
typedef struct {
ggml_half d;
uint16_t extra;
- uint8_t scales[QK_K/32];
- uint8_t qs[QK_K/4];
-} block_iq2_k;
-static_assert(sizeof(block_iq2_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/32 + QK_K/4, "wrong iq2_k block size/padding");
+ uint8_t scales_h[QK_K/64];
+ uint8_t scales_l[QK_K/32];
+ uint8_t qs[QK_K/2];
+ uint8_t qh[QK_K/8];
+} block_iq5_k;
+static_assert(sizeof(block_iq5_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/2 + QK_K/8 + 3*QK_K/64, "wrong iq5_k block size/padding");
+
#endif // GGML_COMMON_DECL
#endif // GGML_COMMON_DECL
@@ -1893,13 +1904,18 @@ GGML_TABLE_BEGIN(uint32_t, iq1s_grid_gpu, NGRID_IQ1S)
GGML_TABLE_END()
#endif
+GGML_TABLE_BEGIN(int8_t, iq2nl_values, 8)
+ -31, -13, 1, 17, -26, -8, 6, 22
+GGML_TABLE_END()
+
GGML_TABLE_BEGIN(int8_t, iq4k_values, 32)
-127, -104, -83, -65, -49, -35, -22, -10, 1, 13, 25, 38, 53, 69, 89, 113,
-123, -100, -79, -61, -45, -31, -18, -6, 5, 17, 29, 42, 57, 73, 93, 117
GGML_TABLE_END()
-GGML_TABLE_BEGIN(int8_t, iq2nl_values, 8)
- -31, -13, 1, 17, -26, -8, 6, 22
+GGML_TABLE_BEGIN(int8_t, iq5nl_values, 64)
+ -126, -114, -103, -92, -83, -74, -65, -57, -50, -43, -36, -30, -24, -18, -12, -6, -1, 5, 11, 17, 23, 29, 36, 43, 51, 59, 68, 77, 87, 97, 109, 121,
+ -124, -112, -101, -90, -81, -72, -63, -55, -48, -41, -34, -28, -22, -16, -10, -4, 1, 7, 13, 19, 25, 31, 38, 45, 53, 61, 70, 79, 89, 99, 111, 123,
GGML_TABLE_END()
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
index a4c93ad6..ba9d89aa 100644
--- a/ggml/src/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda.cu
@@ -2754,6 +2754,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ2_K:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh
index 12eebb00..ff37dd56 100644
--- a/ggml/src/ggml-cuda/common.cuh
+++ b/ggml/src/ggml-cuda/common.cuh
@@ -684,6 +684,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ4_K> {
};
template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ5_K> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR4_XS;
+ static constexpr int qi = QI4_XS;
+};
+
+template<>
struct ggml_cuda_type_traits<GGML_TYPE_IQ3_S> {
static constexpr int qk = QK_K;
static constexpr int qr = QR3_S;
diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index 6dd0fc50..f388e9f3 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -544,6 +544,33 @@ static __global__ void dequantize_block_iq4_k(const void * __restrict__ vx, dst_
}
template<typename dst_t>
+static __global__ void dequantize_block_iq5_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
+
+ const int i = blockIdx.x;
+ const block_iq5_k * x = (const block_iq5_k *) vx;
+
+ const int tid = threadIdx.x;
+ int ib64 = tid/8; // 0...3
+ int il = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 64*ib64 + 2*il;
+ const float d = (float)x[i].d;
+ const float dl1 = d * (((x[i].scales_l[2*ib64+0] & 0xf) | ((x[i].scales_h[ib64] << 4) & 0x30)) - 32);
+ const float dl2 = d * (((x[i].scales_l[2*ib64+0] >> 4) | ((x[i].scales_h[ib64] << 2) & 0x30)) - 32);
+ const float dl3 = d * (((x[i].scales_l[2*ib64+1] & 0xf) | ((x[i].scales_h[ib64] >> 0) & 0x30)) - 32);
+ const float dl4 = d * (((x[i].scales_l[2*ib64+1] >> 4) | ((x[i].scales_h[ib64] >> 2) & 0x30)) - 32);
+ const uint8_t * qs = x[i].qs + 32*ib64 + 2*il;
+ const uint8_t * qh = x[i].qh + 2*il;
+ const uint8_t extra = x[i].extra >> 4*(ib64%4);
+ for (int j = 0; j < 2; ++j) {
+ const uint8_t h1 = qh[j] >> 2*(ib64%4), h2 = qh[j+16] >> 2*(ib64%4);
+ y[j+ 0] = dl1 * iq5nl_values[(qs[j+ 0] & 0xf) | ((h1 & 1) << 4) | ((extra << 5) & 0x20)];
+ y[j+16] = dl2 * iq5nl_values[(qs[j+16] & 0xf) | ((h2 & 1) << 4) | ((extra << 4) & 0x20)];
+ y[j+32] = dl3 * iq5nl_values[(qs[j+ 0] >> 4) | ((h1 & 2) << 3) | ((extra << 3) & 0x20)];
+ y[j+48] = dl4 * iq5nl_values[(qs[j+16] >> 4) | ((h2 & 2) << 3) | ((extra << 2) & 0x20)];
+ }
+}
+
+template<typename dst_t>
static __global__ void dequantize_block_iq2_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
@@ -705,6 +732,12 @@ static void dequantize_row_iq4_k_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq5_k_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) {
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq5_k<<<nb, 32, 0, stream>>>(vx, y);
+}
+
+template<typename dst_t>
static void dequantize_row_iq2_k_cuda(const void * vx, dst_t * y, const int64_t k, cudaStream_t stream) {
const int nb = (k + QK_K - 1) / QK_K;
dequantize_block_iq2_k<<<nb, 32, 0, stream>>>(vx, y);
@@ -776,6 +809,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_K:
return dequantize_row_iq4_k_cuda;
+ case GGML_TYPE_IQ5_K:
+ return dequantize_row_iq5_k_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_S:
@@ -831,6 +866,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_K:
return dequantize_row_iq4_k_cuda;
+ case GGML_TYPE_IQ5_K:
+ return dequantize_row_iq5_k_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_S:
diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu
index b99dc245..776ca80f 100644
--- a/ggml/src/ggml-cuda/mmvq.cu
+++ b/ggml/src/ggml-cuda/mmvq.cu
@@ -25,6 +25,7 @@ static constexpr __device__ vec_dot_q_cuda_t get_vec_dot_q_cuda(ggml_type type)
type == GGML_TYPE_IQ4_NL ? vec_dot_iq4_nl_q8_1 :
type == GGML_TYPE_IQ4_XS ? vec_dot_iq4_xs_q8_1 :
type == GGML_TYPE_IQ4_K ? vec_dot_iq4_k_q8_1 :
+ type == GGML_TYPE_IQ5_K ? vec_dot_iq5_k_q8_1 :
type == GGML_TYPE_IQ2_K ? vec_dot_iq2_k_q8_1 :
type == GGML_TYPE_IQ3_S ? vec_dot_iq3_s_q8_1 :
nullptr;
@@ -49,6 +50,7 @@ static constexpr __device__ int get_vdr_mmvq(ggml_type type) {
type == GGML_TYPE_IQ4_NL ? VDR_IQ4_NL_Q8_1_MMVQ :
type == GGML_TYPE_IQ4_XS ? VDR_IQ4_XS_Q8_1_MMVQ :
type == GGML_TYPE_IQ4_K ? VDR_IQ4_K_Q8_1_MMVQ :
+ type == GGML_TYPE_IQ5_K ? VDR_IQ5_K_Q8_1_MMVQ :
type == GGML_TYPE_IQ2_K ? VDR_IQ2_K_Q8_1_MMVQ :
1;
}
@@ -354,6 +356,13 @@ static void mul_mat_vec_iq4_k_q8_1_cuda(
mul_mat_vec_q_cuda<GGML_TYPE_IQ4_K>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
}
+static void mul_mat_vec_iq5_k_q8_1_cuda(
+ const void * vx, const void * vy, float * dst,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
+
+ mul_mat_vec_q_cuda<GGML_TYPE_IQ5_K>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
+}
+
static void mul_mat_vec_iq2_k_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
@@ -452,6 +461,9 @@ void ggml_cuda_op_mul_mat_vec_q(
case GGML_TYPE_IQ4_K:
mul_mat_vec_iq4_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
+ case GGML_TYPE_IQ5_K:
+ mul_mat_vec_iq5_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
+ break;
case GGML_TYPE_IQ2_K:
mul_mat_vec_iq2_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
diff --git a/ggml/src/ggml-cuda/vecdotq.cuh b/ggml/src/ggml-cuda/vecdotq.cuh
index 97a5619f..414f580b 100644
--- a/ggml/src/ggml-cuda/vecdotq.cuh
+++ b/ggml/src/ggml-cuda/vecdotq.cuh
@@ -1274,6 +1274,38 @@ static __device__ __forceinline__ float vec_dot_iq4_k_q8_1(
return d * (sumi1 * ls1 + sumi2 * ls2);
}
+#define VDR_IQ5_K_Q8_1_MMVQ 4
+#define VDR_IQ5_K_Q8_1_MMQ 4
+
+// TODO
+static __device__ __forceinline__ float vec_dot_iq5_k_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+ return 0;
+
+// const block_iq5_k * bq4 = (const block_iq5_k *) vbq + kbx;
+// const uint8_t * all_values = (const uint8_t *)iq4k_values;
+//
+// // iqs is 0...28
+// const int ib32 = iqs/4;
+// // Why iqs/4 ?
+// const int32_t * q8 = (const int *)bq8_1[ib32].qs;
+// const uint16_t * q4 = (const uint16_t *)bq4->qs + 8*ib32;
+// const uint16_t extra = bq4->extra >> 2*ib32;
+// int v1, v2;
+// int sumi1 = 0, sumi2 = 0;
+// for (int j = 0; j < 4; ++j) {
+// const uint32_t aux32 = q4[2*j+0] | (q4[2*j+1] << 16);
+// get_int_from_table_16_shift(aux32, extra, all_values, v1, v2);
+// sumi1 = ggml_cuda_dp4a(v1, q8[j+0], sumi1);
+// sumi2 = ggml_cuda_dp4a(v2, q8[j+4], sumi2);
+// }
+// const float d = __half2float(bq4->d) * __low2float(bq8_1[ib32].ds);
+// const uint8_t sh = bq4->scales_h[ib32/2] >> 4*(ib32%2);
+// const int ls1 = ((bq4->scales_l[ib32] & 0xf) | ((sh << 4) & 0x30)) - 32;
+// const int ls2 = ((bq4->scales_l[ib32] >> 4) | ((sh << 2) & 0x30)) - 32;
+// return d * (sumi1 * ls1 + sumi2 * ls2);
+}
+
#define VDR_IQ2_K_Q8_1_MMVQ 4
#define VDR_IQ2_K_Q8_1_MMQ 4
diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c
index a5dbff12..4b3bf361 100644
--- a/ggml/src/ggml-quants.c
+++ b/ggml/src/ggml-quants.c
@@ -14949,6 +14949,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
} break;
case GGML_TYPE_IQ2_K: break;
case GGML_TYPE_IQ4_K: break;
+ case GGML_TYPE_IQ5_K: break;
case GGML_TYPE_Q4_0_4_4:
case GGML_TYPE_Q4_0_4_8:
{
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 0881756d..f873e49a 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -980,6 +980,18 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.gemv = ggml_gemv_q4_0_8x8_q8_0,
.gemm = ggml_gemm_q4_0_8x8_q8_0,
},
+ [GGML_TYPE_IQ2_K] = {
+ .type_name = "iq2_k",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq2_k),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq2_k,
+ .from_float = quantize_row_iq2_k,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq2_k_ref,
+ .vec_dot = vec_dot_iq2_k_q8_k,
+ .vec_dot_type = GGML_TYPE_Q8_K,
+ .nrows = 1,
+ },
[GGML_TYPE_IQ4_K] = {
.type_name = "iq4_k",
.blck_size = QK_K,
@@ -992,15 +1004,15 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.vec_dot_type = GGML_TYPE_Q8_K,
.nrows = 1,
},
- [GGML_TYPE_IQ2_K] = {
- .type_name = "iq2_k",
+ [GGML_TYPE_IQ5_K] = {
+ .type_name = "iq5_k",
.blck_size = QK_K,
- .type_size = sizeof(block_iq2_k),
+ .type_size = sizeof(block_iq5_k),
.is_quantized = true,
- .to_float = (ggml_to_float_t) dequantize_row_iq2_k,
- .from_float = quantize_row_iq2_k,
- .from_float_ref = (ggml_from_float_t)quantize_row_iq2_k_ref,
- .vec_dot = vec_dot_iq2_k_q8_k,
+ .to_float = (ggml_to_float_t) dequantize_row_iq5_k,
+ .from_float = quantize_row_iq5_k,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq5_k_ref,
+ .vec_dot = vec_dot_iq5_k_q8_k,
.vec_dot_type = GGML_TYPE_Q8_K,
.nrows = 1,
},
@@ -3353,8 +3365,9 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ2_BN: wtype = GGML_TYPE_IQ2_BN; break;
case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
- case GGML_FTYPE_MOSTLY_IQ4_K: wtype = GGML_TYPE_IQ4_K; break;
case GGML_FTYPE_MOSTLY_IQ2_K: wtype = GGML_TYPE_IQ2_K; break;
+ case GGML_FTYPE_MOSTLY_IQ4_K: wtype = GGML_TYPE_IQ4_K; break;
+ case GGML_FTYPE_MOSTLY_IQ5_K: wtype = GGML_TYPE_IQ5_K; break;
case GGML_FTYPE_MOSTLY_IQ3_S: wtype = GGML_TYPE_IQ3_S; break;
case GGML_FTYPE_MOSTLY_IQ2_S: wtype = GGML_TYPE_IQ2_S; break;
case GGML_FTYPE_MOSTLY_Q4_0_4_4: wtype = GGML_TYPE_Q4_0_4_4; break;
@@ -9604,8 +9617,9 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ2_BN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q4_0_4_4:
@@ -9986,8 +10000,9 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ2_BN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q4_0_4_4:
@@ -10118,8 +10133,9 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ2_BN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q4_0_4_4:
@@ -13039,8 +13055,9 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ2_BN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q4_0_4_4:
@@ -13231,8 +13248,9 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ2_BN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q4_0_4_4:
@@ -13497,8 +13515,9 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ2_BN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q4_0_4_4:
@@ -14090,8 +14109,9 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ2_BN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
- case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ4_K:
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_IQ3_S:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_Q8_K:
@@ -20827,8 +20847,9 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ2_BN: result = quantize_iq2_bn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
- case GGML_TYPE_IQ4_K: result = quantize_iq4_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_K: result = quantize_iq2_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ4_K: result = quantize_iq4_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ5_K: result = quantize_iq5_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q4_0_4_4: result = quantize_q4_0_4x4(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q4_0_4_8: result = quantize_q4_0_4x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_Q4_0_8_8: result = quantize_q4_0_8x8(src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp
index 7722d630..9c502f07 100644
--- a/ggml/src/iqk/iqk_quantize.cpp
+++ b/ggml/src/iqk/iqk_quantize.cpp
@@ -414,6 +414,221 @@ void quantize_row_q8_K64(const float * x, void * y, int64_t k) {
}
//
+// ============================================== iq2_K
+//
+
+namespace {
+
+inline int best_index_iq2nl(const int8_t * values, float x) {
+ int idx = x < values[1] ? 0 : x > values[2] ? 2 : 1;
+ return x - values[idx] < values[idx+1] - x ? idx : idx + 1;
+}
+
+void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const float * quant_weights) {
+
+ constexpr int kBlockSize = 16;
+
+ block_iq2_k * y = (block_iq2_k *)vy;
+
+ float scales[QK_K/kBlockSize];
+ float weight[kBlockSize];
+ float sumx[kBlockSize+1], sumw[kBlockSize+1];
+
+ std::array<std::pair<float,int>, kBlockSize> pairs;
+
+ const int8_t * shifted_values = iq2nl_values + 4;
+
+ for (int ibl = 0; ibl < n_per_row/QK_K; ++ibl) {
+
+ memset(&y[ibl], 0, sizeof(block_iq2_k));
+ y[ibl].d = GGML_FP32_TO_FP16(0.f);
+
+ const float * xbl = x + ibl*QK_K;
+ float sumx2 = 0;
+ for (int j = 0; j < QK_K; ++j) sumx2 += xbl[j]*xbl[j];
+ const float sigma2 = 1.5f*sumx2/QK_K;
+
+ uint16_t extra = 0;
+
+ float max_abs_scale = 0;
+
+ for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
+ const float * xb = xbl + kBlockSize*ib;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*QK_K + ib*kBlockSize;
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
+ }
+ for (int j = 0; j < kBlockSize; ++j) pairs[j] = {xb[j], j};
+ std::sort(pairs.begin(), pairs.end());
+ sumx[0] = sumw[0] = 0;
+ for (int j = 0; j < kBlockSize; ++j) {
+ int jj = pairs[j].second;
+ sumw[j+1] = sumw[j] + weight[jj];
+ sumx[j+1] = sumx[j] + weight[jj]*xb[jj];
+ }
+ float best = 0, d = 0;
+ bool is_shifted = false;
+ float sumqx, sumq2;
+ for (int i1 = 0; i1 < kBlockSize; ++i1) {
+ for (int i2 = i1; i2 < kBlockSize; ++i2) {
+ for (int i3 = i2; i3 < kBlockSize; ++i3) {
+ sumqx = (sumx[i1] - sumx[ 0])*iq2nl_values[0] + (sumx[i2] - sumx[i1])*iq2nl_values[1]
+ + (sumx[i3] - sumx[i2])*iq2nl_values[2] + (sumx[kBlockSize] - sumx[i3])*iq2nl_values[3];
+ sumq2 = (sumw[i1] - sumw[ 0])*iq2nl_values[0]*iq2nl_values[0] + (sumw[i2] - sumw[i1])*iq2nl_values[1]*iq2nl_values[1]
+ + (sumw[i3] - sumw[i2])*iq2nl_values[2]*iq2nl_values[2] + (sumw[kBlockSize] - sumw[i3])*iq2nl_values[3]*iq2nl_values[3];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = false;
+ }
+ sumqx = (sumx[i1] - sumx[ 0])*shifted_values[0] + (sumx[i2] - sumx[i1])*shifted_values[1]
+ + (sumx[i3] - sumx[i2])*shifted_values[2] + (sumx[kBlockSize] - sumx[i3])*shifted_values[3];
+ sumq2 = (sumw[i1] - sumw[ 0])*shifted_values[0]*shifted_values[0] + (sumw[i2] - sumw[i1])*shifted_values[1]*shifted_values[1]
+ + (sumw[i3] - sumw[i2])*shifted_values[2]*shifted_values[2] + (sumw[kBlockSize] - sumw[i3])*shifted_values[3]*shifted_values[3];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = true;
+ }
+ sumqx = (sumx[i1] - sumx[ 0])*iq2nl_values[3] + (sumx[i2] - sumx[i1])*iq2nl_values[2]
+ + (sumx[i3] - sumx[i2])*iq2nl_values[1] + (sumx[kBlockSize] - sumx[i3])*iq2nl_values[0];
+ sumq2 = (sumw[i1] - sumw[ 0])*iq2nl_values[3]*iq2nl_values[3] + (sumw[i2] - sumw[i1])*iq2nl_values[2]*iq2nl_values[2]
+ + (sumw[i3] - sumw[i2])*iq2nl_values[1]*iq2nl_values[1] + (sumw[kBlockSize] - sumw[i3])*iq2nl_values[0]*iq2nl_values[0];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = false;
+ }
+ sumqx = (sumx[i1] - sumx[ 0])*shifted_values[3] + (sumx[i2] - sumx[i1])*shifted_values[2]
+ + (sumx[i3] - sumx[i2])*shifted_values[1] + (sumx[kBlockSize] - sumx[i3])*shifted_values[0];
+ sumq2 = (sumw[i1] - sumw[ 0])*shifted_values[3]*shifted_values[3] + (sumw[i2] - sumw[i1])*shifted_values[2]*shifted_values[2]
+ + (sumw[i3] - sumw[i2])*shifted_values[1]*shifted_values[1] + (sumw[kBlockSize] - sumw[i3])*shifted_values[0]*shifted_values[0];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = true;
+ }
+ }
+ }
+ }
+ scales[ib] = d;
+ if (is_shifted) extra |= (1 << ib);
+
+ float abs_scale = fabsf(scales[ib]);
+ max_abs_scale = MAX(max_abs_scale, abs_scale);
+ }
+
+ if (!max_abs_scale) continue;
+
+ float d = max_abs_scale/15;
+ y[ibl].d = GGML_FP32_TO_FP16(d);
+ y[ibl].extra = extra;
+ float id = 1/d;
+
+ float sumqx = 0, sumq2 = 0;
+ for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
+ int ls = nearest_int(0.5f*(id*scales[ib]+15));
+ ls = MAX(0, MIN(15, ls));
+ y[ibl].scales[ib/2] |= (ls << 4*(ib%2));
+ ls = 2*ls - 15;
+ float dl = d * ls;
+ if (dl) {
+ const int8_t * block_values = y[ibl].extra & (1 << ib) ? shifted_values : iq2nl_values;
+ const float * xb = xbl + kBlockSize*ib;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*QK_K + ib*kBlockSize;
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
+ }
+ float idl = 1/dl;
+ int ib32 = ib/2;
+ int offset = 16*(ib%2);
+ uint8_t * qs = y[ibl].qs + 32*(ib32/4) + offset;
+ for (int j = 0; j < 16; ++j) {
+ const float al = idl*xb[j];
+ int ibest = best_index_iq2nl(block_values, al);
+ qs[j] |= (ibest << 2*(ib32%4));
+ float w = weight[j];
+ float q = block_values[ibest]*ls;
+ sumqx += w*q*xb[j];
+ sumq2 += w*q*q;
+ }
+ }
+ }
+ if (sumq2 > 0) y[ibl].d = GGML_FP32_TO_FP16(sumqx/sumq2);
+
+ }
+}
+}
+
+void quantize_row_iq2_k_ref(const float * GGML_RESTRICT x, block_iq2_k * GGML_RESTRICT y, int64_t k) {
+ assert(k % QK_K == 0);
+ quantize_iq2_k(x, (void *)y, 1, k, nullptr);
+}
+
+void quantize_row_iq2_k(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
+ assert(k % QK_K == 0);
+ block_iq2_k * y = (block_iq2_k *)vy;
+ quantize_row_iq2_k_ref(x, y, k);
+}
+
+size_t quantize_iq2_k(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+ GGML_ASSERT(n_per_row%QK_K == 0);
+ int nblock = n_per_row/QK_K;
+ char * qrow = (char *)dst;
+ for (int64_t row = 0; row < nrows; ++row) {
+ quantize_row_iq2_k_impl(src, (void *)qrow, n_per_row, imatrix);
+ src += n_per_row;
+ qrow += nblock*sizeof(block_iq2_k);
+ }
+ return nrows * nblock * sizeof(block_iq2_k);
+}
+
+void dequantize_row_iq2_k(const block_iq2_k * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ for (int i = 0; i < nb; i++) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const uint8_t * qs = x[i].qs;
+
+ uint16_t extra = x[i].extra;
+
+ int shift = 0;
+ for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
+ float dl1 = d * (2*(x[i].scales[ib32] & 0xf) - 15);
+ float dl2 = d * (2*(x[i].scales[ib32] >> 4) - 15);
+ const int8_t * values1 = extra & 1 ? iq2nl_values + 4 : iq2nl_values;
+ const int8_t * values2 = extra & 2 ? iq2nl_values + 4 : iq2nl_values;
+ extra >>= 2;
+ for (int j = 0; j < 16; ++j) {
+ y[j+ 0] = dl1 * values1[(qs[j+ 0] >> shift) & 3];
+ y[j+16] = dl2 * values2[(qs[j+16] >> shift) & 3];
+ }
+ y += 32;
+ shift += 2;
+ if (shift == 8) { qs += 32; shift = 0; }
+ }
+
+ }
+
+}
+
+void vec_dot_iq2_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ GGML_UNUSED(nrc);
+ GGML_UNUSED(bx);
+ GGML_UNUSED(by);
+ GGML_UNUSED(bs);
+
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ2_K, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
+ return;
+ }
+
+ const int nb = n / QK_K;
+
+ const block_iq2_k * x = (const block_iq2_k *)vx;
+ const block_q8_K * y = (const block_q8_K *)vy;
+}
+
+//
// ============================================== iq4_K
//
void dequantize_row_iq4_k(const block_iq4_k * x, float * y, int64_t k) {
@@ -700,135 +915,297 @@ size_t quantize_iq4_k(const float * src, void * dst, int64_t nrows, int64_t n_pe
}
//
-// ============================================== iq2_K
+// ============================================== iq5_K
//
+void dequantize_row_iq5_k(const block_iq5_k * x, float * y, int64_t k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
-namespace {
+ for (int i = 0; i < nb; i++) {
-inline int best_index_iq2nl(const int8_t * values, float x) {
- int idx = x < values[1] ? 0 : x > values[2] ? 2 : 1;
- return x - values[idx] < values[idx+1] - x ? idx : idx + 1;
+ const float d = GGML_FP16_TO_FP32(x[i].d);
+ const uint8_t * qs = x[i].qs;
+ const uint8_t * qh = x[i].qh;
+ const uint8_t * sl = x[i].scales_l;
+ const uint8_t * sh = x[i].scales_h;
+
+ uint16_t extra = x[i].extra;
+
+ int shift = 0;
+ for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
+
+ float dl1 = d * (((sl[2*ib64+0] & 0xf) | ((sh[ib64] << 4) & 0x30)) - 32);
+ float dl2 = d * (((sl[2*ib64+0] >> 4) | ((sh[ib64] << 2) & 0x30)) - 32);
+ float dl3 = d * (((sl[2*ib64+1] & 0xf) | ((sh[ib64] >> 0) & 0x30)) - 32);
+ float dl4 = d * (((sl[2*ib64+1] >> 4) | ((sh[ib64] >> 2) & 0x30)) - 32);
+ const int8_t * values1 = iq5nl_values + ((extra & 1) << 5);
+ const int8_t * values2 = iq5nl_values + ((extra & 2) << 4);
+ const int8_t * values3 = iq5nl_values + ((extra & 4) << 3);
+ const int8_t * values4 = iq5nl_values + ((extra & 8) << 2);
+ for (int j = 0; j < 16; ++j) {
+ y[j+ 0] = dl1 * values1[(qs[j+ 0] & 0xf) | (((qh[j+ 0] >> shift) & 1) << 4)];
+ y[j+16] = dl2 * values2[(qs[j+16] & 0xf) | (((qh[j+16] >> shift) & 1) << 4)];
+ y[j+32] = dl3 * values3[(qs[j+ 0] >> 4) | (((qh[j+ 0] >> shift) & 2) << 3)];
+ y[j+48] = dl4 * values4[(qs[j+16] >> 4) | (((qh[j+16] >> shift) & 2) << 3)];
+ }
+ y += 64;
+ qs += 32;
+ extra >>= 4;
+ shift += 2;
+ if (shift == 8) { qh += 32; shift = 0; }
+ }
+
+ }
}
-void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const float * quant_weights) {
+void vec_dot_iq5_k_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ GGML_UNUSED(nrc);
+ GGML_UNUSED(bx);
+ GGML_UNUSED(by);
+ GGML_UNUSED(bs);
- constexpr int kBlockSize = 16;
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ5_K, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
+ return;
+ }
- block_iq2_k * y = (block_iq2_k *)vy;
+ const int nb = n / QK_K;
- float scales[QK_K/kBlockSize];
- float weight[kBlockSize];
- float sumx[kBlockSize+1], sumw[kBlockSize+1];
+ const block_iq5_k * x = (const block_iq5_k *)vx;
+ const block_q8_K * y = (const block_q8_K *)vy;
- std::array<std::pair<float,int>, kBlockSize> pairs;
+ float sumf = 0;
- const int8_t * shifted_values = iq2nl_values + 4;
+ for (int i = 0; i < nb; i++) {
+
+ const float d = GGML_FP16_TO_FP32(x[i].d) * y[i].d;
+ const uint8_t * qs = x[i].qs;
+ const uint8_t * qh = x[i].qh;
+ const uint8_t * sl = x[i].scales_l;
+ const uint8_t * sh = x[i].scales_h;
+ const int8_t * q8 = y[i].qs;
+
+ uint16_t extra = x[i].extra;
+
+ int shift = 0;
+ int sumb = 0;
+ for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
+
+ int dl1 = (((sl[2*ib64+0] & 0xf) | ((sh[ib64] << 4) & 0x30)) - 32);
+ int dl2 = (((sl[2*ib64+0] >> 4) | ((sh[ib64] << 2) & 0x30)) - 32);
+ int dl3 = (((sl[2*ib64+1] & 0xf) | ((sh[ib64] >> 0) & 0x30)) - 32);
+ int dl4 = (((sl[2*ib64+1] >> 4) | ((sh[ib64] >> 2) & 0x30)) - 32);
+ const int8_t * values1 = iq5nl_values + ((extra & 1) << 5);
+ const int8_t * values2 = iq5nl_values + ((extra & 2) << 4);
+ const int8_t * values3 = iq5nl_values + ((extra & 4) << 3);
+ const int8_t * values4 = iq5nl_values + ((extra & 8) << 2);
+ int sumi1 = 0, sumi2 = 0, sumi3 = 0, sumi4 = 0;
+ for (int j = 0; j < 16; ++j) {
+ sumi1 += q8[j+ 0] * values1[(qs[j+ 0] & 0xf) | (((qh[j+ 0] >> shift) & 1) << 4)];
+ sumi2 += q8[j+16] * values2[(qs[j+16] & 0xf) | (((qh[j+16] >> shift) & 1) << 4)];
+ sumi3 += q8[j+32] * values3[(qs[j+ 0] >> 4) | (((qh[j+ 0] >> shift) & 2) << 3)];
+ sumi4 += q8[j+48] * values4[(qs[j+16] >> 4) | (((qh[j+16] >> shift) & 2) << 3)];
+ }
+ sumb += dl1 * sumi1 + dl2 * sumi2 + dl3 * sumi3 + dl4 * sumi4;
+ q8 += 64;
+ qs += 32;
+ extra >>= 4;
+ shift += 2;
+ }
+ sumf += d * sumb;
+
+ }
+
+ *s = sumf;
+
+}
+
+namespace {
+static int8_t iq5nl_index[248] = {
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6,
+ 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 10, 10, 10, 10, 10, 10,
+ 11, 11, 11, 11, 11, 11, 12, 12, 12, 12, 12, 12, 13, 13, 13, 13, 13, 13, 14, 14, 14, 14, 14, 14, 15, 15, 15, 15, 15, 16, 16, 16,
+ 16, 16, 16, 17, 17, 17, 17, 17, 17, 18, 18, 18, 18, 18, 18, 19, 19, 19, 19, 19, 19, 20, 20, 20, 20, 20, 20, 21, 21, 21, 21, 21,
+ 21, 21, 22, 22, 22, 22, 22, 22, 22, 23, 23, 23, 23, 23, 23, 23, 23, 24, 24, 24, 24, 24, 24, 24, 24, 25, 25, 25, 25, 25, 25, 25,
+ 25, 25, 26, 26, 26, 26, 26, 26, 26, 26, 26, 27, 27, 27, 27, 27, 27, 27, 27, 27, 27, 28, 28, 28, 28, 28, 28, 28, 28, 28, 28, 29,
+ 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 29, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30, 30
+};
+static inline int best_index_iq5nl(const int8_t * values, float x) {
+ if (x <= values[ 0]) return 0;
+ if (x >= values[31]) return 31;
+ int index = iq5nl_index[(int)x - values[0]];
+ return x - values[index] < values[index+1] - x ? index : index+1;
+}
+
+void quantize_row_iq5_k_impl(const float * x, void * vy, int n_per_row, const float * quant_weights) {
+ const int ntry = 5;
+ const float step = 1.f;
+
+ block_iq5_k * y = (block_iq5_k *)vy;
+
+ float scales[QK_K/16];
+ float weight[16];
+
+ const int8_t * shifted_values = iq5nl_values + 32;
for (int ibl = 0; ibl < n_per_row/QK_K; ++ibl) {
- memset(&y[ibl], 0, sizeof(block_iq2_k));
+ memset(&y[ibl], 0, sizeof(block_iq5_k));
y[ibl].d = GGML_FP32_TO_FP16(0.f);
const float * xbl = x + ibl*QK_K;
float sumx2 = 0;
for (int j = 0; j < QK_K; ++j) sumx2 += xbl[j]*xbl[j];
- const float sigma2 = 1.5f*sumx2/QK_K;
+ const float sigma2 = 2*sumx2/QK_K;
+ float max_scale = 0, max_abs_scale = 0;
uint16_t extra = 0;
- float max_abs_scale = 0;
-
- for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
- const float * xb = xbl + kBlockSize*ib;
+ for (int ib = 0; ib < QK_K/16; ++ib) {
+ const float * xb = xbl + 16*ib;
if (quant_weights) {
- const float * qw = quant_weights + ibl*QK_K + ib*kBlockSize;
- for (int j = 0; j < kBlockSize; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ const float * qw = quant_weights + ibl*QK_K + ib*16;
+ for (int j = 0; j < 16; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
} else {
- for (int j = 0; j < kBlockSize; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
+ for (int j = 0; j < 16; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
}
- for (int j = 0; j < kBlockSize; ++j) pairs[j] = {xb[j], j};
- std::sort(pairs.begin(), pairs.end());
- sumx[0] = sumw[0] = 0;
- for (int j = 0; j < kBlockSize; ++j) {
- int jj = pairs[j].second;
- sumw[j+1] = sumw[j] + weight[jj];
- sumx[j+1] = sumx[j] + weight[jj]*xb[jj];
+ float amax = 0, max = 0;
+ for (int j = 0; j < 16; ++j) {
+ float ax = fabsf(xb[j]);
+ if (ax > amax) {
+ amax = ax; max = xb[j];
+ }
+ }
+ if (!amax) {
+ scales[ib] = 0;
+ continue;
+ }
+ float d = ntry > 0 ? -max/iq5nl_values[0] : max/iq5nl_values[0];
+ float id = 1/d;
+ float sumqx_p = 0, sumq2_p = 0;
+ float sumqx_m = 0, sumq2_m = 0;
+ for (int j = 0; j < 16; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq5nl(iq5nl_values, al);
+ float q = iq5nl_values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq5nl(iq5nl_values, -al);
+ q = iq5nl_values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ d = sumqx_p/sumq2_p;
+ float best = d*sumqx_p;
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d*sumqx_m;
}
- float best = 0, d = 0;
bool is_shifted = false;
- float sumqx, sumq2;
- for (int i1 = 0; i1 < kBlockSize; ++i1) {
- for (int i2 = i1; i2 < kBlockSize; ++i2) {
- for (int i3 = i2; i3 < kBlockSize; ++i3) {
- sumqx = (sumx[i1] - sumx[ 0])*iq2nl_values[0] + (sumx[i2] - sumx[i1])*iq2nl_values[1]
- + (sumx[i3] - sumx[i2])*iq2nl_values[2] + (sumx[kBlockSize] - sumx[i3])*iq2nl_values[3];
- sumq2 = (sumw[i1] - sumw[ 0])*iq2nl_values[0]*iq2nl_values[0] + (sumw[i2] - sumw[i1])*iq2nl_values[1]*iq2nl_values[1]
- + (sumw[i3] - sumw[i2])*iq2nl_values[2]*iq2nl_values[2] + (sumw[kBlockSize] - sumw[i3])*iq2nl_values[3]*iq2nl_values[3];
- if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
- d = sumqx/sumq2; best = d*sumqx; is_shifted = false;
- }
- sumqx = (sumx[i1] - sumx[ 0])*shifted_values[0] + (sumx[i2] - sumx[i1])*shifted_values[1]
- + (sumx[i3] - sumx[i2])*shifted_values[2] + (sumx[kBlockSize] - sumx[i3])*shifted_values[3];
- sumq2 = (sumw[i1] - sumw[ 0])*shifted_values[0]*shifted_values[0] + (sumw[i2] - sumw[i1])*shifted_values[1]*shifted_values[1]
- + (sumw[i3] - sumw[i2])*shifted_values[2]*shifted_values[2] + (sumw[kBlockSize] - sumw[i3])*shifted_values[3]*shifted_values[3];
- if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
- d = sumqx/sumq2; best = d*sumqx; is_shifted = true;
- }
- sumqx = (sumx[i1] - sumx[ 0])*iq2nl_values[3] + (sumx[i2] - sumx[i1])*iq2nl_values[2]
- + (sumx[i3] - sumx[i2])*iq2nl_values[1] + (sumx[kBlockSize] - sumx[i3])*iq2nl_values[0];
- sumq2 = (sumw[i1] - sumw[ 0])*iq2nl_values[3]*iq2nl_values[3] + (sumw[i2] - sumw[i1])*iq2nl_values[2]*iq2nl_values[2]
- + (sumw[i3] - sumw[i2])*iq2nl_values[1]*iq2nl_values[1] + (sumw[kBlockSize] - sumw[i3])*iq2nl_values[0]*iq2nl_values[0];
- if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
- d = sumqx/sumq2; best = d*sumqx; is_shifted = false;
- }
- sumqx = (sumx[i1] - sumx[ 0])*shifted_values[3] + (sumx[i2] - sumx[i1])*shifted_values[2]
- + (sumx[i3] - sumx[i2])*shifted_values[1] + (sumx[kBlockSize] - sumx[i3])*shifted_values[0];
- sumq2 = (sumw[i1] - sumw[ 0])*shifted_values[3]*shifted_values[3] + (sumw[i2] - sumw[i1])*shifted_values[2]*shifted_values[2]
- + (sumw[i3] - sumw[i2])*shifted_values[1]*shifted_values[1] + (sumw[kBlockSize] - sumw[i3])*shifted_values[0]*shifted_values[0];
- if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
- d = sumqx/sumq2; best = d*sumqx; is_shifted = true;
- }
- }
+ for (int itry = -ntry; itry <= ntry; ++itry) {
+ id = (itry*step + iq5nl_values[0])/max;
+ sumqx_p = sumq2_p = 0;
+ sumqx_m = sumq2_m = 0;
+ for (int j = 0; j < 16; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq5nl(iq5nl_values, al);
+ float q = iq5nl_values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq5nl(iq5nl_values, -al);
+ q = iq5nl_values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = false;
+ }
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = false;
+ }
+ id = (itry*step + shifted_values[0])/max;
+ sumqx_p = sumq2_p = 0;
+ sumqx_m = sumq2_m = 0;
+ for (int j = 0; j < 16; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq5nl(shifted_values, al);
+ float q = shifted_values[l];
+ sumqx_p += w*q*xb[j];
+ sumq2_p += w*q*q;
+ l = best_index_iq5nl(shifted_values, -al);
+ q = shifted_values[l];
+ sumqx_m += w*q*xb[j];
+ sumq2_m += w*q*q;
+ }
+ if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = true;
+ }
+ if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = true;
}
}
+ if (d) {
+ const int8_t * block_values = is_shifted ? shifted_values : iq5nl_values;
+ float sumqx = 0, sumq2 = 0;
+ id = 1/d;
+ for (int j = 0; j < 16; ++j) {
+ float w = weight[j];
+ float al = id*xb[j];
+ int l = best_index_iq5nl(block_values, al);
+ float q = block_values[l];
+ sumqx += w*q*xb[j];
+ sumq2 += w*q*q;
+ }
+ if (sumq2 > 0) d = sumqx/sumq2;
+ }
scales[ib] = d;
if (is_shifted) extra |= (1 << ib);
float abs_scale = fabsf(scales[ib]);
- max_abs_scale = MAX(max_abs_scale, abs_scale);
+ if (abs_scale > max_abs_scale) {
+ max_abs_scale = abs_scale; max_scale = scales[ib];
+ }
+
}
if (!max_abs_scale) continue;
-
- float d = max_abs_scale/15;
+ float d = -max_scale/32;
y[ibl].d = GGML_FP32_TO_FP16(d);
y[ibl].extra = extra;
+
float id = 1/d;
float sumqx = 0, sumq2 = 0;
- for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
- int ls = nearest_int(0.5f*(id*scales[ib]+15));
- ls = MAX(0, MIN(15, ls));
- y[ibl].scales[ib/2] |= (ls << 4*(ib%2));
- ls = 2*ls - 15;
+ for (int ib = 0; ib < QK_K/16; ++ib) {
+ int ls = nearest_int(id*scales[ib]);
+ ls = MAX(-32, MIN(31, ls));
+ int uls = ls + 32;
+ y[ibl].scales_l[ib/2] |= ((uls & 0xf) << 4*(ib%2));
+ y[ibl].scales_h[ib/4] |= ((uls >> 4) << 2*(ib%4));
float dl = d * ls;
if (dl) {
- const int8_t * block_values = y[ibl].extra & (1 << ib) ? shifted_values : iq2nl_values;
- const float * xb = xbl + kBlockSize*ib;
+ const int8_t * block_values = y[ibl].extra & (1 << ib) ? shifted_values : iq5nl_values;
+ const float * xb = xbl + 16*ib;
if (quant_weights) {
- const float * qw = quant_weights + ibl*QK_K + ib*kBlockSize;
- for (int j = 0; j < kBlockSize; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ const float * qw = quant_weights + ibl*QK_K + ib*16;
+ for (int j = 0; j < 16; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
} else {
- for (int j = 0; j < kBlockSize; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
+ for (int j = 0; j < 16; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
}
float idl = 1/dl;
int ib32 = ib/2;
int offset = 16*(ib%2);
- uint8_t * qs = y[ibl].qs + 32*(ib32/4) + offset;
+ uint8_t * qs = y[ibl].qs + 32*(ib32/2) + offset;
+ uint8_t * qh = y[ibl].qh + 32*(ib32/8) + offset;
for (int j = 0; j < 16; ++j) {
const float al = idl*xb[j];
- int ibest = best_index_iq2nl(block_values, al);
- qs[j] |= (ibest << 2*(ib32%4));
+ int ibest = best_index_iq5nl(block_values, al);
+ qs[j] |= ((ibest & 0xf) << 4*(ib32%2));
+ qh[j] |= ((ibest >> 4) << (ib32%8));
float w = weight[j];
float q = block_values[ibest]*ls;
sumqx += w*q*xb[j];
@@ -839,77 +1216,30 @@ void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const fl
if (sumq2 > 0) y[ibl].d = GGML_FP32_TO_FP16(sumqx/sumq2);
}
+
}
+
}
-void quantize_row_iq2_k_ref(const float * GGML_RESTRICT x, block_iq2_k * GGML_RESTRICT y, int64_t k) {
+void quantize_row_iq5_k_ref(const float * x, block_iq5_k * y, int64_t k) {
assert(k % QK_K == 0);
- quantize_iq2_k(x, (void *)y, 1, k, nullptr);
+ quantize_iq5_k(x, (void *)y, 1, k, nullptr);
}
-void quantize_row_iq2_k(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
+void quantize_row_iq5_k(const float * x, void * vy, int64_t k) {
assert(k % QK_K == 0);
- block_iq2_k * y = (block_iq2_k *)vy;
- quantize_row_iq2_k_ref(x, y, k);
+ block_iq5_k * y = (block_iq5_k *)vy;
+ quantize_row_iq5_k_ref(x, y, k);
}
-size_t quantize_iq2_k(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+size_t quantize_iq5_k(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
char * qrow = (char *)dst;
for (int64_t row = 0; row < nrows; ++row) {
- quantize_row_iq2_k_impl(src, (void *)qrow, n_per_row, imatrix);
+ quantize_row_iq5_k_impl(src, (void *)qrow, n_per_row, imatrix);
src += n_per_row;
- qrow += nblock*sizeof(block_iq2_k);
- }
- return nrows * nblock * sizeof(block_iq2_k);
-}
-
-void dequantize_row_iq2_k(const block_iq2_k * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) {
- assert(k % QK_K == 0);
- const int nb = k / QK_K;
-
- for (int i = 0; i < nb; i++) {
-
- const float d = GGML_FP16_TO_FP32(x[i].d);
- const uint8_t * qs = x[i].qs;
-
- uint16_t extra = x[i].extra;
-
- int shift = 0;
- for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- float dl1 = d * (2*(x[i].scales[ib32] & 0xf) - 15);
- float dl2 = d * (2*(x[i].scales[ib32] >> 4) - 15);
- const int8_t * values1 = extra & 1 ? iq2nl_values + 4 : iq2nl_values;
- const int8_t * values2 = extra & 2 ? iq2nl_values + 4 : iq2nl_values;
- extra >>= 2;
- for (int j = 0; j < 16; ++j) {
- y[j+ 0] = dl1 * values1[(qs[j+ 0] >> shift) & 3];
- y[j+16] = dl2 * values2[(qs[j+16] >> shift) & 3];
- }
- y += 32;
- shift += 2;
- if (shift == 8) { qs += 32; shift = 0; }
- }
-
+ qrow += nblock*sizeof(block_iq5_k);
}
-
-}
-
-void vec_dot_iq2_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
- assert(n % QK_K == 0);
- assert(nrc == 1);
- GGML_UNUSED(nrc);
- GGML_UNUSED(bx);
- GGML_UNUSED(by);
- GGML_UNUSED(bs);
-
- if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ2_K, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
- return;
- }
-
- const int nb = n / QK_K;
-
- const block_iq2_k * x = (const block_iq2_k *)vx;
- const block_q8_K * y = (const block_q8_K *)vy;
+ return nrows * nblock * sizeof(block_iq5_k);
}
diff --git a/ggml/src/iqk/iqk_quantize.h b/ggml/src/iqk/iqk_quantize.h
index f36eff38..b8b03169 100644
--- a/ggml/src/iqk/iqk_quantize.h
+++ b/ggml/src/iqk/iqk_quantize.h
@@ -13,17 +13,23 @@ extern "C" {
#define GGML_RESTRICT restrict
#endif
+void quantize_row_iq2_k_ref(const float * GGML_RESTRICT x, block_iq2_k * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq2_k(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+size_t quantize_iq2_k(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
+void dequantize_row_iq2_k(const block_iq2_k * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
+void vec_dot_iq2_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+
void quantize_row_iq4_k_ref(const float * GGML_RESTRICT x, block_iq4_k * GGML_RESTRICT y, int64_t k);
void quantize_row_iq4_k(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
size_t quantize_iq4_k(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
void dequantize_row_iq4_k(const block_iq4_k * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_iq4_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
-void quantize_row_iq2_k_ref(const float * GGML_RESTRICT x, block_iq2_k * GGML_RESTRICT y, int64_t k);
-void quantize_row_iq2_k(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
-size_t quantize_iq2_k(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
-void dequantize_row_iq2_k(const block_iq2_k * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
-void vec_dot_iq2_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+void quantize_row_iq5_k_ref(const float * GGML_RESTRICT x, block_iq5_k * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq5_k(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+size_t quantize_iq5_k(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
+void dequantize_row_iq5_k(const block_iq5_k * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
+void vec_dot_iq5_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
#ifdef __cplusplus
}
diff --git a/include/llama.h b/include/llama.h
index 3549d3f3..7bccd4bb 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -170,8 +170,9 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_Q4_0_8_8 = 35, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ1_BN = 36, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_BN = 37, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_IQ4_K = 38, // except 1d tensors
- LLAMA_FTYPE_MOSTLY_IQ2_K = 39, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_IQ2_K = 38, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_IQ4_K = 39, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_IQ5_K = 40, // except 1d tensors
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
};
diff --git a/src/llama.cpp b/src/llama.cpp
index 3f9a211c..4e7e4a6c 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -3761,8 +3761,9 @@ struct llama_model_loader {
case GGML_TYPE_IQ2_BN: ftype = LLAMA_FTYPE_MOSTLY_IQ2_BN; break;
case GGML_TYPE_IQ4_NL: ftype = LLAMA_FTYPE_MOSTLY_IQ4_NL; break;
case GGML_TYPE_IQ4_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ4_XS; break;
- case GGML_TYPE_IQ4_K: ftype = LLAMA_FTYPE_MOSTLY_IQ4_K; break;
case GGML_TYPE_IQ2_K: ftype = LLAMA_FTYPE_MOSTLY_IQ2_K; break;
+ case GGML_TYPE_IQ4_K: ftype = LLAMA_FTYPE_MOSTLY_IQ4_K; break;
+ case GGML_TYPE_IQ5_K: ftype = LLAMA_FTYPE_MOSTLY_IQ5_K; break;
case GGML_TYPE_IQ3_S: ftype = LLAMA_FTYPE_MOSTLY_IQ3_S; break;
case GGML_TYPE_Q4_0_4_4: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_4; break;
case GGML_TYPE_Q4_0_4_8: ftype = LLAMA_FTYPE_MOSTLY_Q4_0_4_8; break;
@@ -4458,8 +4459,9 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
case LLAMA_FTYPE_MOSTLY_IQ1_M: return "IQ1_M - 1.75 bpw";
case LLAMA_FTYPE_MOSTLY_IQ4_NL: return "IQ4_NL - 4.5 bpw";
case LLAMA_FTYPE_MOSTLY_IQ4_XS: return "IQ4_XS - 4.25 bpw";
- case LLAMA_FTYPE_MOSTLY_IQ4_K: return "IQ4_K - 4.5 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_K: return "IQ2_K - 2.375 bpw";
+ case LLAMA_FTYPE_MOSTLY_IQ4_K: return "IQ4_K - 4.5 bpw";
+ case LLAMA_FTYPE_MOSTLY_IQ5_K: return "IQ5_K - 5.5 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_S: return "IQ3_S - 3.4375 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_M: return "IQ3_S mix - 3.66 bpw";
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: return "Q4_0_4_4";
@@ -15635,7 +15637,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
new_type == GGML_TYPE_Q5_K || new_type == GGML_TYPE_Q6_K || new_type == GGML_TYPE_IQ4_XS ||
new_type == GGML_TYPE_IQ2_XS || new_type == GGML_TYPE_IQ2_XXS || new_type == GGML_TYPE_IQ2_S ||
new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S || new_type == GGML_TYPE_IQ3_S ||
- new_type == GGML_TYPE_IQ1_M || new_type == GGML_TYPE_IQ4_K || new_type == GGML_TYPE_IQ2_K) {
+ new_type == GGML_TYPE_IQ1_M || new_type == GGML_TYPE_IQ4_K || new_type == GGML_TYPE_IQ2_K ||
+ new_type == GGML_TYPE_IQ5_K) {
int nx = tensor->ne[0];
int ny = tensor->ne[1];
if (nx % QK_K != 0) {
@@ -15666,6 +15669,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
case GGML_TYPE_IQ4_XS: new_type = GGML_TYPE_IQ4_NL; break;
case GGML_TYPE_IQ4_K:
case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break;
+ case GGML_TYPE_IQ5_K:
case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break;
case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break;
default: throw std::runtime_error("\nUnsupported tensor size encountered\n");
@@ -15768,8 +15772,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_IQ2_BN: default_type = GGML_TYPE_IQ2_BN; break;
case LLAMA_FTYPE_MOSTLY_IQ4_NL: default_type = GGML_TYPE_IQ4_NL; break;
case LLAMA_FTYPE_MOSTLY_IQ4_XS: default_type = GGML_TYPE_IQ4_XS; break;
- case LLAMA_FTYPE_MOSTLY_IQ4_K: default_type = GGML_TYPE_IQ4_K; break;
case LLAMA_FTYPE_MOSTLY_IQ2_K: default_type = GGML_TYPE_IQ2_K; break;
+ case LLAMA_FTYPE_MOSTLY_IQ4_K: default_type = GGML_TYPE_IQ4_K; break;
+ case LLAMA_FTYPE_MOSTLY_IQ5_K: default_type = GGML_TYPE_IQ5_K; break;
case LLAMA_FTYPE_MOSTLY_IQ3_S: default_type = GGML_TYPE_IQ3_S; break;
case LLAMA_FTYPE_MOSTLY_IQ3_M: default_type = GGML_TYPE_IQ3_S; break;
case LLAMA_FTYPE_MOSTLY_Q4_0_4_4: default_type = GGML_TYPE_Q4_0_4_4; break;