summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/quantize-stats/quantize-stats.cpp127
-rw-r--r--examples/quantize/quantize.cpp1
-rw-r--r--ggml/include/ggml.h2
-rw-r--r--ggml/src/ggml-common.h7
-rw-r--r--ggml/src/ggml-cuda.cu1
-rw-r--r--ggml/src/ggml-cuda/common.cuh7
-rw-r--r--ggml/src/ggml-cuda/convert.cu48
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cu69
-rw-r--r--ggml/src/ggml-cuda/iqk_mmvq.cuh4
-rw-r--r--ggml/src/ggml-cuda/mmvq.cu3
-rw-r--r--ggml/src/ggml-metal.m32
-rw-r--r--ggml/src/ggml-metal.metal175
-rw-r--r--ggml/src/ggml-quants.c50
-rw-r--r--ggml/src/ggml-quants.h4
-rw-r--r--ggml/src/ggml.c30
-rw-r--r--ggml/src/iqk/iqk_mul_mat.cpp190
-rw-r--r--ggml/src/iqk/iqk_quantize.cpp417
-rw-r--r--ggml/src/iqk/iqk_quantize.h6
-rw-r--r--include/llama.h1
-rw-r--r--src/llama.cpp13
20 files changed, 1130 insertions, 57 deletions
diff --git a/examples/quantize-stats/quantize-stats.cpp b/examples/quantize-stats/quantize-stats.cpp
index 88a7d2b9..34d05bf2 100644
--- a/examples/quantize-stats/quantize-stats.cpp
+++ b/examples/quantize-stats/quantize-stats.cpp
@@ -3,6 +3,10 @@
#include "ggml.h"
#include "llama.h"
+#define GGML_COMMON_DECL_C
+#define GGML_COMMON_IMPL_C
+#include "../ggml/src/ggml-common.h"
+
#include <algorithm>
#include <cassert>
#include <cinttypes>
@@ -21,6 +25,20 @@
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
+#include <intrin.h>
+#include <ammintrin.h>
+#include <nmmintrin.h>
+#include <immintrin.h>
+#include <stdlib.h>
+inline int popcount(uint8_t x) { return __popcnt(x); }
+inline int popcount(uint16_t x) { return __popcnt(x); }
+inline int popcount(uint32_t x) { return __popcnt(x); }
+inline int popcount(uint64_t x) { return _mm_popcnt_u64(x); }
+#else
+constexpr int popcount(uint8_t x) { return __builtin_popcount(x); }
+constexpr int popcount(uint16_t x) { return __builtin_popcount(x); }
+constexpr int popcount(uint32_t x) { return __builtin_popcount(x); }
+constexpr int popcount(uint64_t x) { return __builtin_popcountll(x); }
#endif
struct quantize_stats_params {
@@ -228,6 +246,97 @@ static void test_roundtrip_on_layer(
}
}
+static void analyze_iq4ks(const char * name, int nrows, int n_per_row, const float * values, float& tot_mse, float& tot_elements) {
+ int row_size = ggml_row_size(GGML_TYPE_IQ4_KS, n_per_row);
+ int nblock = n_per_row/QK_K;
+ int nthread = std::max(1, int(std::thread::hardware_concurrency()/2));
+ int chunk = (nrows + 8*nthread - 1)/(8*nthread);
+ std::mutex mutex;
+ int counter = 0;
+ float mse0 = 0, mse = 0;
+ auto compute = [&mutex, &counter, &mse0, &mse, values, row_size, nblock, nrows, n_per_row, chunk] () {
+ std::vector<char> Q(row_size);
+ float lmse0 = 0, lmse = 0;
+ while (true) {
+ std::unique_lock<std::mutex> lock(mutex);
+ int first = counter; counter += chunk;
+ if (first >= nrows) {
+ mse += lmse; mse0 += lmse0;
+ return;
+ }
+ lock.unlock();
+ int last = std::min(first + chunk, nrows);
+ for (int row = first; row < last; ++row) {
+ auto xr = values + row*n_per_row;
+ ggml_quantize_chunk(GGML_TYPE_IQ4_KS, xr, (void *)Q.data(), 0, 1, n_per_row, nullptr);
+ const float * dptr = (const float *)Q.data();
+ const float d = *dptr;
+ const block_iq4_ks * iq4 = (const block_iq4_ks *)(dptr + 1);
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+ const float * xbl = xr + ibl*QK_K;
+ auto qs = iq4[ibl].qs;
+ for (int ib = 0; ib < QK_K/32; ++ib) {
+ const float * xb = xbl + 32*ib;
+ const float dl = d * ((iq4[ibl].scales[ib] & 254) - 127);
+ const int8_t * values = iq4k_values + ((iq4[ibl].scales[ib] & 1) << 4);
+ for (int j = 0; j < 16; j += 2) {
+ uint16_t v0 = *(const uint16_t *)(qs + j);
+ int non = popcount(v0);
+ float diff1 = xb[j+ 0] - dl*values[qs[j+0] & 0xf];
+ float diff2 = xb[j+16] - dl*values[qs[j+0] >> 4];
+ float diff3 = xb[j+ 1] - dl*values[qs[j+1] & 0xf];
+ float diff4 = xb[j+17] - dl*values[qs[j+1] >> 4];
+ lmse0 += diff1*diff1 + diff2*diff2 + diff3*diff3 + diff4*diff4;
+ if (non%2 == 0) {
+ lmse += diff1*diff1 + diff2*diff2 + diff3*diff3 + diff4*diff4;
+ } else {
+ float best = std::numeric_limits<float>::max();
+ for (int k = 0; k < 16; k += 4) {
+ uint16_t v = v0 ^ (1 << k);
+ uint8_t v1 = v;
+ uint8_t v2 = v >> 8;
+ diff1 = xb[j+ 0] - dl*values[v1 & 0xf];
+ diff2 = xb[j+16] - dl*values[v1 >> 4];
+ diff3 = xb[j+ 1] - dl*values[v2 & 0xf];
+ diff4 = xb[j+17] - dl*values[v2 >> 4];
+ float score = diff1*diff1 + diff2*diff2 + diff3*diff3 + diff4*diff4;
+ if (score < best) best = score;
+ }
+ lmse += best;
+ }
+ }
+ qs += 16;
+ }
+ }
+ }
+ }
+ };
+ std::vector<std::thread> workers(nthread-1);
+ for (auto& w : workers) w = std::thread(compute);
+ compute();
+ for (auto& w : workers) w.join();
+ tot_mse += mse;
+ tot_elements += n_per_row*nrows;
+ printf("%s: %g %g %g\n", name, sqrt(mse0/(n_per_row*nrows)), sqrt(mse/(n_per_row*nrows)), sqrt(tot_mse/tot_elements));
+}
+
+static void analyze_iq4ks(const ggml_tensor * t, float& tot_mse, float& tot_elements) {
+ if (!ggml_is_contiguous(t) || (t->type != GGML_TYPE_F32 && t->type != GGML_TYPE_F16 && t->type != GGML_TYPE_BF16)) {
+ return;
+ }
+ if (t->type == GGML_TYPE_F32) {
+ analyze_iq4ks(t->name, t->ne[1], t->ne[0], (const float *)t->data, tot_mse, tot_elements);
+ } else {
+ std::vector<float> aux(t->ne[0]*t->ne[1]);
+ if (t->type == GGML_TYPE_F16) {
+ ggml_fp16_to_fp32_row((const ggml_fp16_t *)t->data, aux.data(), aux.size());
+ } else {
+ ggml_bf16_to_fp32_row((const ggml_bf16_t *)t->data, aux.data(), aux.size());
+ }
+ analyze_iq4ks(t->name, t->ne[1], t->ne[0], aux.data(), tot_mse, tot_elements);
+ }
+}
+
static void print_fp_stats(const char * msg, const uint64_t * counts) {
printf("===== %s\n", msg);
uint64_t tot = 0; for (int i = 0; i < 32; ++i) tot += counts[i];
@@ -263,6 +372,7 @@ int main(int argc, char ** argv) {
int max_thread = 0;
bool invalid_param = false;
bool analyze_fp = false;
+ bool analyze = false;
std::string arg;
for (int i = 1; i < argc; i++) {
arg = argv[i];
@@ -278,6 +388,8 @@ int main(int argc, char ** argv) {
params.per_layer_stats = true;
} else if (arg == "-afp" || arg == "--analyze-fp") {
analyze_fp = true;
+ } else if (arg == "-a" || arg == "--analyze") {
+ analyze = true;
} else if (arg == "--histogram") {
params.print_histogram = true;
} else if (arg == "-m" || arg == "--model") {
@@ -404,6 +516,21 @@ int main(int argc, char ** argv) {
std::vector<char> quantized_scratch;
std::vector<float> output_scratch;
+ if (analyze) {
+ float tot_mse = 0, tot_elements = 0;
+ for (const auto& kv_tensor : tensors) {
+ if (!layer_included(params, kv_tensor.first)) {
+ continue;
+ }
+ if (kv_tensor.second->ne[0] == 1 || kv_tensor.second->ne[1] == 1) {
+ // we never quantize those
+ continue;
+ }
+ analyze_iq4ks(kv_tensor.second, tot_mse, tot_elements);
+ }
+ return 0;
+ }
+
if (analyze_fp) {
for (const auto& kv_tensor : tensors) {
if (!layer_included(params, kv_tensor.first)) {
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 3cc19f70..1ace5720 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -45,6 +45,7 @@ static const std::vector<struct quant_option> QUANT_OPTIONS = {
{ "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", },
{ "IQ4_KS", LLAMA_FTYPE_MOSTLY_IQ4_KS, " 4.25 bpw non-linear quantization", },
{ "IQ2_K", LLAMA_FTYPE_MOSTLY_IQ2_K, " 2.375 bpw non-linear quantization",},
+ { "IQ2_KS", LLAMA_FTYPE_MOSTLY_IQ2_KS, " 2.1875 bpw non-linear quantization",},
{ "IQ3_K", LLAMA_FTYPE_MOSTLY_IQ3_K, " 3.44 bpw non-linear quantization", },
{ "IQ3_KL", LLAMA_FTYPE_MOSTLY_IQ3_KL, " 4 bpw non-linear quantization mix",},
{ "IQ4_K", LLAMA_FTYPE_MOSTLY_IQ4_K, " 4.5 bpw non-linear quantization", },
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index 3054dabd..fd7c23b9 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -404,6 +404,7 @@ extern "C" {
GGML_TYPE_IQ2_TN = 142,
GGML_TYPE_IQ1_TN = 143,
GGML_TYPE_IQ4_KS = 144,
+ GGML_TYPE_IQ2_KS = 145,
GGML_TYPE_COUNT,
};
@@ -460,6 +461,7 @@ extern "C" {
GGML_FTYPE_MOSTLY_IQ2_TN = 135, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ1_TN = 136, // except 1d tensors
GGML_FTYPE_MOSTLY_IQ4_KS = 137, // except 1d tensors
+ GGML_FTYPE_MOSTLY_IQ2_KS = 138, // except 1d tensors
};
// available tensor operations:
diff --git a/ggml/src/ggml-common.h b/ggml/src/ggml-common.h
index 7eaf7437..3a7b8989 100644
--- a/ggml/src/ggml-common.h
+++ b/ggml/src/ggml-common.h
@@ -456,6 +456,13 @@ typedef struct {
static_assert(sizeof(block_iq2_k) == sizeof(ggml_half) + sizeof(uint16_t) + QK_K/32 + QK_K/4, "wrong iq2_k block size/padding");
typedef struct {
+ uint16_t extra;
+ uint8_t scales[QK_K/64];
+ uint8_t qs[QK_K/4];
+} block_iq2_ks;
+static_assert(sizeof(block_iq2_ks) == sizeof(uint16_t) + QK_K/64 + QK_K/4, "wrong iq2_ks block size/padding");
+
+typedef struct {
ggml_half d;
uint16_t extra;
uint16_t scales_h;
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
index 0657252d..6648b7f8 100644
--- a/ggml/src/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda.cu
@@ -2830,6 +2830,7 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
diff --git a/ggml/src/ggml-cuda/common.cuh b/ggml/src/ggml-cuda/common.cuh
index c00cef29..a6a9c3d3 100644
--- a/ggml/src/ggml-cuda/common.cuh
+++ b/ggml/src/ggml-cuda/common.cuh
@@ -516,6 +516,13 @@ struct ggml_cuda_type_traits<GGML_TYPE_IQ2_K> {
};
template<>
+struct ggml_cuda_type_traits<GGML_TYPE_IQ2_KS> {
+ static constexpr int qk = QK_K;
+ static constexpr int qr = QR4_XS;
+ static constexpr int qi = QI4_XS;
+};
+
+template<>
struct ggml_cuda_type_traits<GGML_TYPE_IQ3_K> {
static constexpr int qk = QK_K;
static constexpr int qr = QR4_XS;
diff --git a/ggml/src/ggml-cuda/convert.cu b/ggml/src/ggml-cuda/convert.cu
index 62dd52a2..1e4421b1 100644
--- a/ggml/src/ggml-cuda/convert.cu
+++ b/ggml/src/ggml-cuda/convert.cu
@@ -729,10 +729,10 @@ static __global__ void dequantize_block_iq2_k(const void * __restrict__ vx, dst_
int il = tid%16; // 0...15
dst_t * y = yy + i*QK_K + 128*ib128 + 2*il;
const float d = (float)x[i].d;
- const float dl1 = d * (2*((x[i].scales[4*ib128+0] >> 4*(il/8)) & 0xf) - 15);
- const float dl2 = d * (2*((x[i].scales[4*ib128+1] >> 4*(il/8)) & 0xf) - 15);
- const float dl3 = d * (2*((x[i].scales[4*ib128+2] >> 4*(il/8)) & 0xf) - 15);
- const float dl4 = d * (2*((x[i].scales[4*ib128+3] >> 4*(il/8)) & 0xf) - 15);
+ const float dl1 = d * (((x[i].scales[4*ib128+0] >> 4*(il/8)) & 0xf) - 8);
+ const float dl2 = d * (((x[i].scales[4*ib128+1] >> 4*(il/8)) & 0xf) - 8);
+ const float dl3 = d * (((x[i].scales[4*ib128+2] >> 4*(il/8)) & 0xf) - 8);
+ const float dl4 = d * (((x[i].scales[4*ib128+3] >> 4*(il/8)) & 0xf) - 8);
const uint8_t * qs = x[i].qs + 32*ib128 + 2*il;
const int16_t extra = x[i].extra >> (8*ib128 + (il/8));
for (int j = 0; j < 2; ++j) {
@@ -744,6 +744,34 @@ static __global__ void dequantize_block_iq2_k(const void * __restrict__ vx, dst_
}
template<typename dst_t>
+static __global__ void dequantize_block_iq2_ks(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t n_per_row, int64_t row_size) {
+
+ int64_t ii = blockIdx.x;
+ int64_t row = (QK_K * ii) / n_per_row;
+ const char * cx = (const char *)vx + row * row_size;
+ const float d = (float)*(const half *)cx;
+ const block_iq2_ks * x = (const block_iq2_ks *)(cx + sizeof(half));
+ const int64_t i = ii - (row*n_per_row)/QK_K;
+
+ const int tid = threadIdx.x;
+ int ib128 = tid/16; // 0 or 1
+ int il = tid%16; // 0...15
+ dst_t * y = yy + ii*QK_K + 128*ib128 + 2*il;
+ const int16_t extra = x[i].extra >> 4*ib128;
+ const float dl1 = d * (((x[i].scales[2*ib128+0] & 0xf) | ((extra >> 4) & 0x10)) - 16);
+ const float dl2 = d * (((x[i].scales[2*ib128+0] >> 4) | ((extra >> 5) & 0x10)) - 16);
+ const float dl3 = d * (((x[i].scales[2*ib128+1] & 0xf) | ((extra >> 6) & 0x10)) - 16);
+ const float dl4 = d * (((x[i].scales[2*ib128+1] >> 4) | ((extra >> 7) & 0x10)) - 16);
+ const uint8_t * qs = x[i].qs + 32*ib128 + 2*il;
+ for (int j = 0; j < 2; ++j) {
+ y[j+ 0] = dl1 * iq2nl_values[((qs[j] >> 0) & 0x03) + ((extra << 2) & 4)];
+ y[j+32] = dl2 * iq2nl_values[((qs[j] >> 2) & 0x03) + ((extra << 1) & 4)];
+ y[j+64] = dl3 * iq2nl_values[((qs[j] >> 4) & 0x03) + ((extra >> 0) & 4)];
+ y[j+96] = dl4 * iq2nl_values[((qs[j] >> 6) & 0x03) + ((extra >> 1) & 4)];
+ }
+}
+
+template<typename dst_t>
static __global__ void dequantize_block_iq3_k(const void * __restrict__ vx, dst_t * __restrict__ yy) {
const int i = blockIdx.x;
@@ -953,6 +981,14 @@ static void dequantize_row_iq4_ks_cuda(const void * vx, dst_t * y, const int64_t
}
template<typename dst_t>
+static void dequantize_row_iq2_ks_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
+ const int64_t k = nrows * n_per_row;
+ const int64_t row_size = ggml_row_size(GGML_TYPE_IQ2_KS, n_per_row);
+ const int nb = (k + QK_K - 1) / QK_K;
+ dequantize_block_iq2_ks<<<nb, 32, 0, stream>>>(vx, y, n_per_row, row_size);
+}
+
+template<typename dst_t>
static void dequantize_row_iq2_k_cuda(const void * vx, dst_t * y, const int64_t nrows, const int64_t n_per_row, cudaStream_t stream) {
const int64_t k = nrows * n_per_row;
const int nb = (k + QK_K - 1) / QK_K;
@@ -1116,6 +1152,8 @@ to_fp16_cuda_t ggml_get_to_fp16_cuda(ggml_type type) {
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_KS:
return dequantize_row_iq4_ks_cuda;
+ case GGML_TYPE_IQ2_KS:
+ return dequantize_row_iq2_ks_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_K:
@@ -1187,6 +1225,8 @@ to_fp32_cuda_t ggml_get_to_fp32_cuda(ggml_type type) {
return dequantize_row_iq4_xs_cuda;
case GGML_TYPE_IQ4_KS:
return dequantize_row_iq4_ks_cuda;
+ case GGML_TYPE_IQ2_KS:
+ return dequantize_row_iq2_ks_cuda;
case GGML_TYPE_IQ2_K:
return dequantize_row_iq2_k_cuda;
case GGML_TYPE_IQ3_K:
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cu b/ggml/src/ggml-cuda/iqk_mmvq.cu
index a1f2d28c..9ca219e4 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cu
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cu
@@ -217,7 +217,6 @@ __device__ __forceinline__ float vec_dot_iq4_k_q8_1(
#define VDR_IQ4_KS_Q8_1_MMVQ 4
#define VDR_IQ4_KS_Q8_1_MMQ 4
-// TODO
__device__ __forceinline__ float vec_dot_iq4_ks_q8_1(
const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
@@ -425,7 +424,7 @@ __device__ __forceinline__ float vec_dot_iq2_k_q8_1(
// -> scales_l[4*(i4/4) + k] >> 4*(((i4%4)/2)%2)
const uint32_t * scales = (const uint32_t *)bq2->scales;
- uint32_t s32 = __vsub4(((scales[i4/4] >> 4*(((i4%4)/2)%2)) & 0x0f0f0f0f) << 1, 0x0f0f0f0f);
+ uint32_t s32 = __vsub4((scales[i4/4] >> 4*(((i4%4)/2)%2)) & 0x0f0f0f0f, 0x08080808);
const int8_t * s8 = (const int8_t *)&s32;
aux32[0] = ((val1 >> 0) & 0x03030303); aux32[1] = ((val2 >> 0) & 0x03030303); values = all_values + ((extra & 0x01) << 8);
@@ -455,6 +454,65 @@ __device__ __forceinline__ float vec_dot_iq2_k_q8_1(
}
+#define VDR_IQ2_KS_Q8_1_MMVQ 4
+#define VDR_IQ2_KS_Q8_1_MMQ 4
+
+__device__ __forceinline__ float vec_dot_iq2_ks_q8_1(
+ const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1, const int & kbx, const int & iqs) {
+
+ float scale = *(const half *)vbq;
+ const block_iq2_ks * bq2 = (const block_iq2_ks *)((const char *)vbq + sizeof(half)) + kbx;
+
+ int i4 = iqs/4; // 0...7. We will process q8 blocks 4*(i4/4), 4*(i4/4)+1, 4*(i4/4)+2, 4*(i4/4)+3
+ const int32_t * q8_1 = (const int *)bq8_1[4*(i4/4)+0].qs + 2*(i4%4);
+ const int32_t * q8_2 = (const int *)bq8_1[4*(i4/4)+1].qs + 2*(i4%4);
+ const int32_t * q8_3 = (const int *)bq8_1[4*(i4/4)+2].qs + 2*(i4%4);
+ const int32_t * q8_4 = (const int *)bq8_1[4*(i4/4)+3].qs + 2*(i4%4);
+
+ const uint16_t * q2 = (const uint16_t *)bq2->qs + 16*(i4/4) + 4*(i4%4);
+ const uint16_t extra = bq2->extra >> 4*(i4/4);
+
+ const int * all_values = (const int *)iq2k_table;
+ const int * values;
+
+ uint32_t val1 = q2[0] | (q2[1] << 16), val2 = q2[2] | (q2[3] << 16);
+
+ uint32_t aux32[2];
+ const uint8_t * a8 = (const uint8_t *)&aux32;
+ int v1, v2;
+
+ int8_t s8[4];
+ s8[0] = ((bq2->scales[2*(i4/4)+0] & 0xf) | ((extra >> 4) & 0x10)) - 16;
+ s8[1] = ((bq2->scales[2*(i4/4)+0] >> 4) | ((extra >> 5) & 0x10)) - 16;
+ s8[2] = ((bq2->scales[2*(i4/4)+1] & 0xf) | ((extra >> 6) & 0x10)) - 16;
+ s8[3] = ((bq2->scales[2*(i4/4)+1] >> 4) | ((extra >> 7) & 0x10)) - 16;
+
+ aux32[0] = ((val1 >> 0) & 0x03030303); aux32[1] = ((val2 >> 0) & 0x03030303); values = all_values + ((extra & 0x01) << 8);
+ v1 = int_from_table_4(a8 + 0, values);
+ v2 = int_from_table_4(a8 + 4, values);
+ int sumi1 = ggml_cuda_dp4a(v2, q8_1[1], ggml_cuda_dp4a(v1, q8_1[0], 0)) * s8[0];
+
+ aux32[0] = ((val1 >> 2) & 0x03030303); aux32[1] = ((val2 >> 2) & 0x03030303); values = all_values + ((extra & 0x02) << 7);
+ v1 = int_from_table_4(a8 + 0, values);
+ v2 = int_from_table_4(a8 + 4, values);
+ int sumi2 = ggml_cuda_dp4a(v2, q8_2[1], ggml_cuda_dp4a(v1, q8_2[0], 0)) * s8[1];
+
+ aux32[0] = ((val1 >> 4) & 0x03030303); aux32[1] = ((val2 >> 4) & 0x03030303); values = all_values + ((extra & 0x04) << 6);
+ v1 = int_from_table_4(a8 + 0, values);
+ v2 = int_from_table_4(a8 + 4, values);
+ int sumi3 = ggml_cuda_dp4a(v2, q8_3[1], ggml_cuda_dp4a(v1, q8_3[0], 0)) * s8[2];
+
+ aux32[0] = ((val1 >> 6) & 0x03030303); aux32[1] = ((val2 >> 6) & 0x03030303); values = all_values + ((extra & 0x08) << 5);
+ v1 = int_from_table_4(a8 + 0, values);
+ v2 = int_from_table_4(a8 + 4, values);
+ int sumi4 = ggml_cuda_dp4a(v2, q8_4[1], ggml_cuda_dp4a(v1, q8_4[0], 0)) * s8[3];
+
+ return scale * (__low2float(bq8_1[4*(i4/4)+0].ds) * sumi1
+ + __low2float(bq8_1[4*(i4/4)+1].ds) * sumi2
+ + __low2float(bq8_1[4*(i4/4)+2].ds) * sumi3
+ + __low2float(bq8_1[4*(i4/4)+3].ds) * sumi4);
+}
+
#define VDR_IQ3_K_Q8_1_MMVQ 4
#define VDR_IQ3_K_Q8_1_MMQ 4
@@ -645,6 +703,13 @@ void mul_mat_vec_iq4_ks_q8_1_cuda(
iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ4_KS, VDR_IQ4_KS_Q8_1_MMVQ, vec_dot_iq4_ks_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
}
+void mul_mat_vec_iq2_ks_q8_1_cuda(
+ const void * vx, const void * vy, float * dst,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
+
+ iqk_mul_mat_vec_q_cuda<GGML_TYPE_IQ2_KS, VDR_IQ2_KS_Q8_1_MMVQ, vec_dot_iq2_ks_q8_1>(vx, vy, dst, ncols_x, nrows_x, nrows_y, ncols_y, nrows_dst, stream);
+}
+
void mul_mat_vec_iq5_k_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream) {
diff --git a/ggml/src/ggml-cuda/iqk_mmvq.cuh b/ggml/src/ggml-cuda/iqk_mmvq.cuh
index 8d76be1d..3a93a1b6 100644
--- a/ggml/src/ggml-cuda/iqk_mmvq.cuh
+++ b/ggml/src/ggml-cuda/iqk_mmvq.cuh
@@ -32,3 +32,7 @@ void mul_mat_vec_iq4_ks_q8_1_cuda(
const void * vx, const void * vy, float * dst,
const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
+void mul_mat_vec_iq2_ks_q8_1_cuda(
+ const void * vx, const void * vy, float * dst,
+ const int ncols_x, const int nrows_x, const int nrows_y, const int ncols_y, const int nrows_dst, cudaStream_t stream);
+
diff --git a/ggml/src/ggml-cuda/mmvq.cu b/ggml/src/ggml-cuda/mmvq.cu
index 8e3c4aa4..e312b266 100644
--- a/ggml/src/ggml-cuda/mmvq.cu
+++ b/ggml/src/ggml-cuda/mmvq.cu
@@ -462,6 +462,9 @@ void ggml_cuda_op_mul_mat_vec_q(
case GGML_TYPE_IQ4_KS:
mul_mat_vec_iq4_ks_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
+ case GGML_TYPE_IQ2_KS:
+ mul_mat_vec_iq2_ks_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
+ break;
case GGML_TYPE_IQ5_K:
mul_mat_vec_iq5_k_q8_1_cuda(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_padded_row_size, src1_ncols, nrows_dst, stream);
break;
diff --git a/ggml/src/ggml-metal.m b/ggml/src/ggml-metal.m
index a326a36f..d5e8d6ae 100644
--- a/ggml/src/ggml-metal.m
+++ b/ggml/src/ggml-metal.m
@@ -108,6 +108,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_K,
+ GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_KS,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_K,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_K,
GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_K,
@@ -150,6 +151,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_K_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_IQ5_K_F32,
@@ -186,6 +188,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_K_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ5_K_F32,
@@ -219,6 +222,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_K_F32,
@@ -252,6 +256,7 @@ enum ggml_metal_kernel_type {
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_K_F32,
+ GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_KS_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_K_F32,
GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_K_F32,
@@ -646,6 +651,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS, get_rows_iq4_xs, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS, get_rows_iq4_ks, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_K, get_rows_iq2_k, true);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_KS, get_rows_iq2_ks, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_K, get_rows_iq3_k, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_K, get_rows_iq4_k, true);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_K, get_rows_iq5_k, true);
@@ -688,6 +694,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_XS_F32, mul_mv_iq4_xs_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_KS_F32, mul_mv_iq4_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_K_F32, mul_mv_iq2_k_f32, ctx->support_simdgroup_reduction);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_KS_F32, mul_mv_iq2_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ3_K_F32, mul_mv_iq3_k_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ4_K_F32, mul_mv_iq4_k_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_IQ5_K_F32, mul_mv_iq5_k_f32, ctx->support_simdgroup_reduction);
@@ -724,6 +731,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_XS_F32, mul_mv_id_iq4_xs_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_KS_F32, mul_mv_id_iq4_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_K_F32, mul_mv_id_iq2_k_f32, ctx->support_simdgroup_reduction);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_KS_F32, mul_mv_id_iq2_ks_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ3_K_F32, mul_mv_id_iq3_k_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ4_K_F32, mul_mv_id_iq4_k_f32, ctx->support_simdgroup_reduction);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ5_K_F32, mul_mv_id_iq5_k_f32, ctx->support_simdgroup_reduction);
@@ -757,6 +765,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32, mul_mm_iq4_xs_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32, mul_mm_iq4_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F32, mul_mm_iq2_k_f32, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F32, mul_mm_iq2_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F32, mul_mm_iq3_k_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_K_F32, mul_mm_iq4_k_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_K_F32, mul_mm_iq5_k_f32, ctx->support_simdgroup_mm);
@@ -790,6 +799,7 @@ static struct ggml_backend_metal_context * ggml_metal_init(int n_cb) {
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32, mul_mm_id_iq4_xs_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32, mul_mm_id_iq4_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_K_F32, mul_mm_id_iq2_k_f32, ctx->support_simdgroup_mm);
+ GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_KS_F32, mul_mm_id_iq2_ks_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_K_F32, mul_mm_id_iq3_k_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_K_F32, mul_mm_id_iq4_k_f32, ctx->support_simdgroup_mm);
GGML_METAL_ADD_KERNEL(GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_K_F32, mul_mm_id_iq5_k_f32, ctx->support_simdgroup_mm);
@@ -1988,6 +1998,7 @@ static enum ggml_status ggml_metal_graph_compute(
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_XS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_K_F32 ].pipeline; break;
+ case GGML_TYPE_IQ2_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ2_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ3_K_F32 ].pipeline; break;
case GGML_TYPE_IQ4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ4_K_F32 ].pipeline; break;
case GGML_TYPE_IQ5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_IQ5_K_F32 ].pipeline; break;
@@ -2217,6 +2228,12 @@ static enum ggml_status ggml_metal_graph_compute(
nth1 = 16;
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_K_F32].pipeline;
} break;
+ case GGML_TYPE_IQ2_KS:
+ {
+ nth0 = 4;
+ nth1 = 16;
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_IQ2_KS_F32].pipeline;
+ } break;
case GGML_TYPE_IQ3_K:
{
nth0 = 4;
@@ -2276,6 +2293,11 @@ static enum ggml_status ggml_metal_graph_compute(
src0t == GGML_TYPE_IQ3_K || src0t == GGML_TYPE_IQ2_TN|| src0t == GGML_TYPE_IQ1_TN) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
+ else if (src0t == GGML_TYPE_IQ2_KS) {
+ const int mem_size = 64*sizeof(float);
+ [encoder setThreadgroupMemoryLength:mem_size atIndex:0];
+ [encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, ne11, ne12*ne13) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
+ }
else if (src0t == GGML_TYPE_IQ2_XXS || src0t == GGML_TYPE_IQ2_XS) {
const int mem_size = src0t == GGML_TYPE_IQ2_XXS ? 256*8+128 : 512*8+128;
[encoder setThreadgroupMemoryLength:mem_size atIndex:0];
@@ -2384,6 +2406,7 @@ static enum ggml_status ggml_metal_graph_compute(
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_XS_F32 ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_K_F32 ].pipeline; break;
+ case GGML_TYPE_IQ2_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ2_KS_F32 ].pipeline; break;
case GGML_TYPE_IQ3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ3_K_F32 ].pipeline; break;
case GGML_TYPE_IQ4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ4_K_F32 ].pipeline; break;
case GGML_TYPE_IQ5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MM_ID_IQ5_K_F32 ].pipeline; break;
@@ -2601,6 +2624,12 @@ static enum ggml_status ggml_metal_graph_compute(
nth1 = 16;
pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_K_F32].pipeline;
} break;
+ case GGML_TYPE_IQ2_KS:
+ {
+ nth0 = 4;
+ nth1 = 16;
+ pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_MUL_MV_ID_IQ2_KS_F32].pipeline;
+ } break;
case GGML_TYPE_IQ3_K:
{
nth0 = 4;
@@ -2667,7 +2696,7 @@ static enum ggml_status ggml_metal_graph_compute(
if (src0t == GGML_TYPE_Q4_0 || src0t == GGML_TYPE_Q4_1 || src0t == GGML_TYPE_Q5_0 || src0t == GGML_TYPE_Q6_0 ||
src0t == GGML_TYPE_Q5_1 || src0t == GGML_TYPE_Q8_0 || src0t == GGML_TYPE_Q2_K ||
src0t == GGML_TYPE_IQ1_S || src0t == GGML_TYPE_IQ1_M || src0t == GGML_TYPE_IQ2_S||
- src0t == GGML_TYPE_IQ1_BN|| src0t == GGML_TYPE_IQ2_BN|| src0t == GGML_TYPE_IQ2_K||
+ src0t == GGML_TYPE_IQ1_BN|| src0t == GGML_TYPE_IQ2_BN|| src0t == GGML_TYPE_IQ2_K|| src0t == GGML_TYPE_IQ2_KS ||
src0t == GGML_TYPE_IQ3_K || src0t == GGML_TYPE_IQ2_TN|| src0t == GGML_TYPE_IQ1_TN) {
[encoder dispatchThreadgroups:MTLSizeMake((ne01 + 7)/8, _ne1, tgz) threadsPerThreadgroup:MTLSizeMake(nth0, nth1, 1)];
}
@@ -2737,6 +2766,7 @@ static enum ggml_status ggml_metal_graph_compute(
case GGML_TYPE_IQ4_XS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_XS ].pipeline; break;
case GGML_TYPE_IQ4_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_KS ].pipeline; break;
case GGML_TYPE_IQ2_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_K ].pipeline; break;
+ case GGML_TYPE_IQ2_KS: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ2_KS ].pipeline; break;
case GGML_TYPE_IQ3_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ3_K ].pipeline; break;
case GGML_TYPE_IQ4_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ4_K ].pipeline; break;
case GGML_TYPE_IQ5_K: pipeline = ctx->kernels[GGML_METAL_KERNEL_TYPE_GET_ROWS_IQ5_K ].pipeline; break;
diff --git a/ggml/src/ggml-metal.metal b/ggml/src/ggml-metal.metal
index ea0cda99..5ed424d3 100644
--- a/ggml/src/ggml-metal.metal
+++ b/ggml/src/ggml-metal.metal
@@ -3685,6 +3685,7 @@ constexpr constant static float kvalues_iq6k_f[128] = {
};
constexpr constant static float kvalues_iq2k_f[8] = { -31.f, -13.f, 1.f, 17.f, -26.f, -8.f, 6.f, 22.f };
+constexpr constant static half kvalues_iq2k_h[8] = { -31.h, -13.h, 1.h, 17.h, -26.h, -8.h, 6.h, 22.h };
constexpr constant static float kvalues_iq3k_f[16] = { -63.f, -40.f, -23.f, -10.f, 1.f, 13.f, 28.f, 47.f, -59.f, -36.f, -19.f, -6.f, 5.f, 17.f, 32.f, 51.f };
constexpr constant static half kvalues_iq3k_h[16] = { -63.h, -40.h, -23.h, -10.h, 1.h, 13.h, 28.h, 47.h, -59.h, -36.h, -19.h, -6.h, 5.h, 17.h, 32.h, 51.h };
@@ -6260,6 +6261,156 @@ kernel void kernel_mul_mv_iq2_k_f32(
kernel_mul_mv_iq2_k_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, nullptr, tgpig, tiisg, sgitg);
}
+void kernel_mul_mv_iq2_ks_f32_impl(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ int64_t ne00,
+ int64_t ne01,
+ int64_t ne02,
+ int64_t ne10,
+ int64_t ne12,
+ int64_t ne0,
+ int64_t ne1,
+ uint r2,
+ uint r3,
+ threadgroup int8_t * shared_values,
+ uint3 tgpig,
+ uint tiisg,
+ uint sgitg) {
+
+ const int nb = ne00/QK_K;
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * N_SIMDGROUP + sgitg) * N_DST;
+ const uint row_size = 2 + nb*sizeof(block_iq2_ks);
+
+ const uint i12 = im%ne12;
+ const uint i13 = im/ne12;
+
+ const uint offset0 = (i12/r2)*(ne01) + (i13/r3)*(ne01*ne02);
+
+ device const char * cx = (device const char *) src0 + (first_row + offset0)*row_size;
+ device const float * y = (device const float *) src1 + r1*ne10 + im*ne00*ne1;
+
+ float yl[32];
+ float sumf[N_DST]={0.f};
+
+ const int ix = tiisg/8; // 0...3
+ const int it = tiisg%8; // 0...7
+ const int iq = it/4; // 0 or 1
+ const int ir = it%4; // 0...3
+
+ device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir;
+
+ threadgroup float * all_values = (threadgroup float *)shared_values + 32*sgitg;
+ {
+ //int row = tiisg%N_DST;
+ //device const half * dptr = (device const half *)(cx + row*row_size);
+ //const float d = *dptr;
+ //all_values[8*row + tiisg/N_DST] = d*iq2nl_values[tiisg/N_DST];
+ //threadgroup_barrier(mem_flags::mem_threadgroup);
+ int row = tiisg/8;
+ int pos = tiisg%8;
+ device const half * dptr = (device const half *)(cx + row*row_size);
+ const float d = *dptr;
+ all_values[8*row + pos] = d*kvalues_iq2k_f[pos];
+ simdgroup_barrier(mem_flags::mem_none);
+ //threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ cx += sizeof(half);
+
+ uint32_t q32[2];
+ uint32_t aux32[2];
+ thread const uint8_t * aux8 = (thread const uint8_t *)aux32;
+
+ for (int ib = ix; ib < nb; ib += 4) {
+
+ for (int i = 0; i < 8; ++i) {
+ yl[i+ 0] = y4[i+ 0];
+ yl[i+ 8] = y4[i+32];
+ yl[i+16] = y4[i+64];
+ yl[i+24] = y4[i+96];
+ }
+
+ device const block_iq2_ks * x = (device const block_iq2_ks *)cx + ib;
+ device const uint16_t * q16 = (device const uint16_t *)x->qs + 16*iq + 4*ir;
+ device const uint16_t * sc = (device const uint16_t *)x->scales;
+ device const uint16_t * ex = (device const uint16_t *)&x->extra;
+
+ for (int row = 0; row < N_DST; row++) {
+
+ threadgroup const float * row_values = all_values + 8*row;
+
+ uint32_t sc32 = (sc[iq] | (sc[iq] << 12)) & 0x0f0f0f0f;
+ thread const int8_t * s8 = (thread const int8_t *)&sc32;
+
+ q32[0] = q16[0] | (q16[1] << 16);
+ q32[1] = q16[2] | (q16[3] << 16);
+
+ uint8_t extra = ex[0] << 4*(1-iq);
+
+ float4 acc = {0.f};
+ for (int l = 0; l < 4; ++l) {
+ threadgroup const float * values = row_values + ((extra >> (2 + l)) & 4);
+ aux32[0] = (q32[0] >> 2*l) & 0x03030303;
+ aux32[1] = (q32[1] >> 2*l) & 0x03030303;
+ for (int j = 0; j < 8; ++j) acc[l] += yl[8*l+j] * values[aux8[j]];
+ }
+ extra = ex[0] >> (8 + 4*iq);
+ sumf[row] += acc[0] * (s8[0] - (extra & 1 ? 0 : 16)) + acc[1] * (s8[2] - (extra & 2 ? 0 : 16))
+ + acc[2] * (s8[1] - (extra & 4 ? 0 : 16)) + acc[3] * (s8[3] - (extra & 8 ? 0 : 16));
+
+ q16 += row_size/2;
+ sc += row_size/2;
+ ex += row_size/2;
+
+ }
+
+ y4 += 4 * QK_K;
+ }
+
+ for (int row = 0; row < N_DST; row += 2) {
+ float2 tmp = {sumf[row], sumf[row+1]};
+ tmp = simd_sum(tmp);
+ if (tiisg < 2) {
+ dst[r1*ne0 + im*ne0*ne1 + first_row + row + tiisg] = tmp[tiisg];
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq2_ks_f32")]]
+kernel void kernel_mul_mv_iq2_ks_f32(
+ device const void * src0,
+ device const float * src1,
+ device float * dst,
+ constant int64_t & ne00,
+ constant int64_t & ne01,
+ constant int64_t & ne02,
+ constant uint64_t & nb00,
+ constant uint64_t & nb01,
+ constant uint64_t & nb02,
+ constant int64_t & ne10,
+ constant int64_t & ne11,
+ constant int64_t & ne12,
+ constant uint64_t & nb10,
+ constant uint64_t & nb11,
+ constant uint64_t & nb12,
+ constant int64_t & ne0,
+ constant int64_t & ne1,
+ constant uint & r2,
+ constant uint & r3,
+ threadgroup int8_t * shared_values [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq2_ks_f32_impl(src0, src1, dst, ne00, ne01, ne02, ne10, ne12, ne0, ne1, r2, r3, shared_values, tgpig, tiisg, sgitg);
+}
+
void kernel_mul_mv_iq3_k_f32_impl(
device const void * src0,
device const float * src1,
@@ -7569,6 +7720,26 @@ void dequantize_iq2_k(device const block_iq2_k * xb, short il, thread type4x4 &
}
template <typename type4x4>
+void dequantize_iq2_ks(device const block_iq2_ks * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256
+ device const uint16_t * q16 = (device const uint16_t *)xb->qs + 16*(il/8) + 8*(il&1);
+ const short ib32 = il/2;
+ half d = (((xb->scales[ib32/2] >> 4*(ib32%2)) & 0xf) - ((xb->extra >> (8 + ib32)) & 1 ? 0 : 16));
+
+ constant half4 * half_values = (constant half4 *)kvalues_iq2k_h;
+ half4 values = half_values[(xb->extra >> ib32) & 1] * d;
+
+ const int shift = 2*((il%8)/2);
+ thread uint16_t aux16[2];
+ thread const uint8_t * aux8 = (thread const uint8_t *)aux16;
+ for (int i = 0; i < 4; ++i) {
+ aux16[0] = (q16[2*i+0] >> shift) & 0x0303;
+ aux16[1] = (q16[2*i+1] >> shift) & 0x0303;
+ for (int j = 0; j < 4; ++j) reg[i][j] = values[aux8[j]];
+ }
+}
+
+template <typename type4x4>
void dequantize_iq3_k(device const block_iq3_k * xb, short il, thread type4x4 & reg) {
// il is 0...15 for QK_K = 256
device const uint16_t * q16l = (device const uint16_t *)xb->qs + 16*(il/8) + 8*(il&1);
@@ -8194,6 +8365,7 @@ template [[host_name("kernel_get_rows_iq2_bn")]] kernel get_rows_q_t kernel_get
template [[host_name("kernel_get_rows_iq1_tn")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq1_bn, half, 4, dequantize_iq1_bn>>;
template [[host_name("kernel_get_rows_iq2_tn")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq2_tn, float, 16, dequantize_iq2_tn>>;
template [[host_name("kernel_get_rows_iq4_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>>;
+template [[host_name("kernel_get_rows_iq2_ks")]] kernel get_rows_q_t kernel_get_rows_q2<DequantizerRS<float4x4, block_iq2_ks, half, 16, dequantize_iq2_ks>>;
//
// matrix-matrix multiplication
@@ -8237,6 +8409,7 @@ template [[host_name("kernel_mul_mm_iq2_bn_f32")]] kernel mat_mm_t kernel_mul_m
template [[host_name("kernel_mul_mm_iq1_tn_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn>>;
template [[host_name("kernel_mul_mm_iq2_tn_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq2_tn, float, 16, dequantize_iq2_tn>>;
template [[host_name("kernel_mul_mm_iq4_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>>;
+template [[host_name("kernel_mul_mm_iq2_ks_f32")]] kernel mat_mm_t kernel_mul_mm<half, simdgroup_half8x8, DequantizerRS<half4x4, block_iq2_ks, half, 16, dequantize_iq2_ks>>;
//
// indirect matrix-matrix multiplication
@@ -8277,6 +8450,7 @@ template [[host_name("kernel_mul_mm_id_iq6_k_f32")]] kernel mat_mm_id_t kernel
template [[host_name("kernel_mul_mm_id_iq1_tn_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq1_bn, half, 4, dequantize_iq1_bn>>;
template [[host_name("kernel_mul_mm_id_iq2_tn_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq2_tn, float, 16, dequantize_iq2_tn>>;
template [[host_name("kernel_mul_mm_id_iq4_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq4_ks, float, 16, dequantize_iq4_ks>>;
+template [[host_name("kernel_mul_mm_id_iq2_ks_f32")]] kernel mat_mm_id_t kernel_mul_mm_id<DequantizerRS<half4x4, block_iq2_ks, half, 16, dequantize_iq2_ks>>;
//
// matrix-vector multiplication
@@ -8494,6 +8668,7 @@ template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t
template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_ks_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq2_k_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_k_f32_impl>>;
+template [[host_name("kernel_mul_mv_id_iq2_ks_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_ks_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq3_k_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq3_k_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq4_k_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_k_f32_impl>>;
template [[host_name("kernel_mul_mv_id_iq5_k_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq5_k_f32_impl>>;
diff --git a/ggml/src/ggml-quants.c b/ggml/src/ggml-quants.c
index 40978ac0..a845eaf5 100644
--- a/ggml/src/ggml-quants.c
+++ b/ggml/src/ggml-quants.c
@@ -12873,7 +12873,6 @@ static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict
const int * kmap_q2xs = iq2_data[gindex].map;
const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
- GGML_ASSERT(quant_weights && "missing quantization weights");
GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
@@ -12908,8 +12907,12 @@ static void quantize_row_iq2_xxs_impl(const float * restrict x, void * restrict
for (int ib = 0; ib < QK_K/32; ++ib) {
const float * xb = xbl + 32*ib;
- const float * qw = quant_weights + QK_K*ibl + 32*ib;
- for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
+ if (quant_weights) {
+ const float * qw = quant_weights + QK_K*ibl + 32*ib;
+ for (int i = 0; i < 32; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
+ } else {
+ for (int i = 0; i < 32; ++i) weight[i] = 0.25f*sigma2 + xb[i]*xb[i];
+ }
for (int i = 0; i < 32; ++i) waux[i] = sqrtf(weight[i]);
for (int k = 0; k < 4; ++k) {
int nflip = 0;
@@ -13046,7 +13049,6 @@ static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict v
const int * kmap_q2xs = iq2_data[gindex].map;
const uint16_t * kneighbors_q2xs = iq2_data[gindex].neighbours;
- GGML_ASSERT(quant_weights && "missing quantization weights");
GGML_ASSERT(kmap_q2xs && "forgot to call ggml_quantize_init()?");
GGML_ASSERT(kgrid_q2xs && "forgot to call ggml_quantize_init()?");
GGML_ASSERT(kneighbors_q2xs && "forgot to call ggml_quantize_init()?");
@@ -13084,8 +13086,12 @@ static void quantize_row_iq2_xs_impl(const float * restrict x, void * restrict v
for (int ib = 0; ib < QK_K/16; ++ib) {
const float * xb = xbl + 16*ib;
- const float * qw = quant_weights + QK_K*ibl + 16*ib;
- for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
+ if (quant_weights) {
+ const float * qw = quant_weights + QK_K*ibl + 16*ib;
+ for (int i = 0; i < 16; ++i) weight[i] = qw[i] * sqrtf(sigma2 + xb[i]*xb[i]);
+ } else {
+ for (int i = 0; i < 16; ++i) weight[i] = 0.25f*sigma2 + xb[i]*xb[i];
+ }
for (int i = 0; i < 16; ++i) waux[i] = sqrtf(weight[i]);
for (int k = 0; k < 2; ++k) {
int nflip = 0;
@@ -13230,6 +13236,17 @@ size_t quantize_iq2_xxs(const float * restrict src, void * restrict dst, int64_t
return nrow * nblock * sizeof(block_iq2_xxs);
}
+void quantize_row_iq2_xxs(const float * restrict x, void * restrict vy, int64_t k) {
+ assert(k % QK_K == 0);
+ block_iq2_xxs * restrict y = vy;
+ quantize_row_iq2_xxs_ref(x, y, k);
+}
+
+void quantize_row_iq2_xxs_ref(const float * restrict x, block_iq2_xxs * restrict y, int64_t k) {
+ assert(k % QK_K == 0);
+ quantize_iq2_xxs(x, y, 1, k, NULL);
+}
+
size_t quantize_iq2_xs(const float * restrict src, void * restrict dst, int64_t nrow, int64_t n_per_row, const float * quant_weights) {
GGML_ASSERT(n_per_row%QK_K == 0);
int64_t nblock = n_per_row/QK_K;
@@ -13242,6 +13259,17 @@ size_t quantize_iq2_xs(const float * restrict src, void * restrict dst, int64_t
return nrow * nblock * sizeof(block_iq2_xs);
}
+void quantize_row_iq2_xs(const float * restrict x, void * restrict vy, int64_t k) {
+ assert(k % QK_K == 0);
+ block_iq2_xs * restrict y = vy;
+ quantize_row_iq2_xs_ref(x, y, k);
+}
+
+void quantize_row_iq2_xs_ref(const float * restrict x, block_iq2_xs * restrict y, int64_t k) {
+ assert(k % QK_K == 0);
+ quantize_iq2_xs(x, y, 1, k, NULL);
+}
+
//
// ============================================= 3-bit using D4 lattice
//
@@ -14947,10 +14975,11 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
return false;
}
- if (type != GGML_TYPE_IQ2_TN && type != GGML_TYPE_IQ1_TN && type != GGML_TYPE_IQ4_KS && nbytes % ggml_type_size(type) != 0) {
- fprintf(stderr, "%s: invalid size %zu for type %s (type size = %zu)\n", __func__, nbytes, ggml_type_name(type), ggml_type_size(type));
- return false;
- }
+ // Who needs this?
+ //if (type != GGML_TYPE_IQ2_TN && type != GGML_TYPE_IQ1_TN && type != GGML_TYPE_IQ4_KS && nbytes % ggml_type_size(type) != 0) {
+ // fprintf(stderr, "%s: invalid size %zu for type %s (type size = %zu)\n", __func__, nbytes, ggml_type_name(type), ggml_type_size(type));
+ // return false;
+ //}
const size_t nb = nbytes/ggml_type_size(type);
@@ -15160,6 +15189,7 @@ bool ggml_validate_row_data(enum ggml_type type, const void * data, size_t nbyte
} break;
case GGML_TYPE_Q6_0: break;
case GGML_TYPE_IQ2_K: break;
+ case GGML_TYPE_IQ2_KS: break;
case GGML_TYPE_IQ3_K: break;
case GGML_TYPE_IQ4_K: break;
case GGML_TYPE_IQ5_K: break;
diff --git a/ggml/src/ggml-quants.h b/ggml/src/ggml-quants.h
index bad7e9d9..a40a6d37 100644
--- a/ggml/src/ggml-quants.h
+++ b/ggml/src/ggml-quants.h
@@ -35,6 +35,8 @@ void quantize_row_q6_K_ref(const float * GGML_RESTRICT x, block_q6_K * GGML_REST
void quantize_row_q8_K_ref(const float * GGML_RESTRICT x, block_q8_K * GGML_RESTRICT y, int64_t k);
void quantize_row_q8_K64_ref(const float * GGML_RESTRICT x, block_q8_K64 * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq2_xxs_ref(const float * GGML_RESTRICT x, block_iq2_xxs * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq2_xs_ref (const float * GGML_RESTRICT x, block_iq2_xs * GGML_RESTRICT y, int64_t k);
void quantize_row_iq3_xxs_ref(const float * GGML_RESTRICT x, block_iq3_xxs * GGML_RESTRICT y, int64_t k);
void quantize_row_iq4_nl_ref (const float * GGML_RESTRICT x, block_iq4_nl * GGML_RESTRICT y, int64_t k);
void quantize_row_iq4_xs_ref (const float * GGML_RESTRICT x, block_iq4_xs * GGML_RESTRICT y, int64_t k);
@@ -59,6 +61,8 @@ void quantize_row_q6_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, in
void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
void quantize_row_q8_K64(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq2_xxs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq2_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
void quantize_row_iq3_xxs(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
void quantize_row_iq4_nl (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
void quantize_row_iq4_xs (const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 97fa81b1..a9f795ae 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -920,8 +920,8 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.type_size = sizeof(block_iq2_xxs),
.is_quantized = true,
.to_float = (ggml_to_float_t) dequantize_row_iq2_xxs,
- .from_float = NULL,
- .from_float_ref = NULL,
+ .from_float = quantize_row_iq2_xxs,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq2_xxs_ref,
.vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
.vec_dot_type = GGML_TYPE_Q8_K,
.nrows = 1,
@@ -933,8 +933,8 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.type_size = sizeof(block_iq2_xs),
.is_quantized = true,
.to_float = (ggml_to_float_t) dequantize_row_iq2_xs,
- .from_float = NULL,
- .from_float_ref = NULL,
+ .from_float = quantize_row_iq2_xs,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq2_xs_ref,
.vec_dot = ggml_vec_dot_iq2_xs_q8_K,
.vec_dot_type = GGML_TYPE_Q8_K,
.nrows = 1,
@@ -1193,6 +1193,19 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.nrows = 1,
.row_meta_size = 0,
},
+ [GGML_TYPE_IQ2_KS] = {
+ .type_name = "iq2_ks",
+ .blck_size = QK_K,
+ .type_size = sizeof(block_iq2_ks),
+ .is_quantized = true,
+ .to_float = (ggml_to_float_t) dequantize_row_iq2_ks,
+ .from_float = quantize_row_iq2_ks,
+ .from_float_ref = (ggml_from_float_t)quantize_row_iq2_ks_ref,
+ .vec_dot = vec_dot_iq2_ks_q8_k,
+ .vec_dot_type = GGML_TYPE_Q8_K,
+ .nrows = 1,
+ .row_meta_size = 2,
+ },
[GGML_TYPE_IQ3_K] = {
.type_name = "iq3_k",
.blck_size = QK_K,
@@ -3906,6 +3919,7 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
case GGML_FTYPE_MOSTLY_IQ4_KS: wtype = GGML_TYPE_IQ4_KS; break;
case GGML_FTYPE_MOSTLY_IQ2_K: wtype = GGML_TYPE_IQ2_K; break;
+ case GGML_FTYPE_MOSTLY_IQ2_KS: wtype = GGML_TYPE_IQ2_KS; break;
case GGML_FTYPE_MOSTLY_IQ3_K: wtype = GGML_TYPE_IQ3_K; break;
case GGML_FTYPE_MOSTLY_IQ4_K: wtype = GGML_TYPE_IQ4_K; break;
case GGML_FTYPE_MOSTLY_IQ5_K: wtype = GGML_TYPE_IQ5_K; break;
@@ -10406,6 +10420,7 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
@@ -10795,6 +10810,7 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
@@ -10934,6 +10950,7 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
@@ -14119,6 +14136,7 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
@@ -14498,6 +14516,7 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
@@ -14771,6 +14790,7 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
@@ -15371,6 +15391,7 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
case GGML_TYPE_IQ2_K:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ3_K:
case GGML_TYPE_IQ4_K:
case GGML_TYPE_IQ5_K:
@@ -22188,6 +22209,7 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_KS: result = quantize_iq4_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_K: result = quantize_iq2_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
+ case GGML_TYPE_IQ2_KS: result = quantize_iq2_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ3_K: result = quantize_iq3_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_K: result = quantize_iq4_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ5_K: result = quantize_iq5_k (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
diff --git a/ggml/src/iqk/iqk_mul_mat.cpp b/ggml/src/iqk/iqk_mul_mat.cpp
index dc457c2f..66d26a25 100644
--- a/ggml/src/iqk/iqk_mul_mat.cpp
+++ b/ggml/src/iqk/iqk_mul_mat.cpp
@@ -402,14 +402,20 @@ struct ScaleIQ4XS {
const __m128i m32 = _mm_set1_epi16(-32);
};
-template <typename Block, bool per_row_scale = false>
+template <typename Block, bool per_row_scale = false, bool is_f16 = false>
struct BaseDequantizer {
BaseDequantizer(const void * vx, size_t bx) : vx(vx), bx(bx) {}
inline void new_row(int ix) {
if constexpr (per_row_scale) {
- const float * dptr = (const float *)((const char *)vx + bx*ix);
- d = *dptr;
- x = (const Block *)(dptr + 1);
+ if constexpr (is_f16) {
+ const ggml_half * dptr = (const ggml_half *)((const char *)vx + bx*ix);
+ d = GGML_FP16_TO_FP32(*dptr);
+ x = (const Block *)(dptr + 1);
+ } else {
+ const float * dptr = (const float *)((const char *)vx + bx*ix);
+ d = *dptr;
+ x = (const Block *)(dptr + 1);
+ }
} else {
x = (const Block *)((const char *)vx + bx*ix);
}
@@ -889,13 +895,61 @@ struct DequantizerIQ2K final : public BaseDequantizer<block_iq2_k> {
inline __m128i make_scales(const uint8_t * scales_l) const {
uint64_t aux64; std::memcpy(&aux64, scales_l, 8);
auto scl = _mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), _mm_set1_epi8(0xf));
- return _mm_add_epi8(_mm_slli_epi16(scl, 1), m15);
+ return _mm_add_epi8(scl, m8);
}
Q2Bits bits;
const IQXKScales iqxk;
const __m512i values;
- const __m128i m15 = _mm_set1_epi8(-15);
+ const __m128i m8 = _mm_set1_epi8(-8);
+};
+
+struct DequantizerIQ2KS final : public BaseDequantizer<block_iq2_ks, true, true> {
+ DequantizerIQ2KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_values()) {}
+ template <typename Q8>
+ inline void new_block(int i, const Q8& q8, __m256 * accm, __m512i * scales) {
+ prepare(x[i].qs);
+ auto scales128 = make_scales(x[i].scales, x[i].extra >> 8);
+ auto shifts = _mm_and_si128(_mm_cmpeq_epi8(_mm_and_si128(_mm_set1_epi8(x[i].extra), hmask), hmask), m5);
+ auto scales_s = _mm_mullo_epi16(scales128, _mm_cvtepi8_epi16(_mm_add_epi8(m32, shifts)));
+ s8k.accum_mins(scales_s, q8, i, d, accm);
+ auto scales256 = MM256_SET_M128I(scales128, scales128);
+ auto all_scales = _mm512_inserti32x8(_mm512_castsi256_si512(scales256), scales256, 1);
+ scales[0] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[0]);
+ scales[1] = _mm512_shuffle_epi8(all_scales, s8k.shuffles512[1]);
+ }
+ inline void prepare(const uint8_t * q2) {
+ bits.prepare(q2);
+ bits.values[0] = _mm512_shuffle_epi8(values, bits.values[0]);
+ bits.values[1] = _mm512_shuffle_epi8(values, bits.values[1]);
+ bits.values[2] = _mm512_shuffle_epi8(values, bits.values[2]);
+ bits.values[3] = _mm512_shuffle_epi8(values, bits.values[3]);
+ }
+ static inline __m512i load_values() {
+ static const uint8_t kvalues_iq2nl[16] = {1, 19, 33, 49, 0, 0, 0, 0, 6, 24, 38, 54, 0, 0, 0, 0};
+ auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq2nl);
+ auto val256 = MM256_SET_M128I(val128, val128);
+ return _mm512_inserti32x8(_mm512_castsi256_si512(val256), val256, 1);
+ }
+ inline __m128i make_scales(const uint8_t * scales_l, uint8_t scales_h) const {
+ const uint16_t * scales = (const uint16_t *)scales_l;
+ uint32_t aux32 = scales[0] | (uint32_t(scales[1]) << 16);
+ auto scl = _mm_srlv_epi32(_mm_set1_epi32(aux32), shift);
+ scl = _mm_and_si128(_mm_shuffle_epi8(scl, shuffle), _mm_set1_epi8(0xf));
+ auto sch = _mm_set1_epi8(scales_h);
+ sch = _mm_and_si128(_mm_cmpeq_epi8(_mm_and_si128(sch, hmask), _mm_setzero_si128()), m16);
+ return _mm_cvtepi8_epi16(_mm_add_epi8(scl, sch));
+ }
+ Q2Bits bits;
+ Scales8K s8k;
+
+ const __m512i values;
+ const __m128i m16 = _mm_set1_epi8(-16);
+ const __m128i m5 = _mm_set1_epi8(5);
+ const __m128i m32 = _mm_set1_epi8(-32);
+ const __m128i hmask = _mm_set1_epi64x(0x8040201008040201);
+ const __m128i shuffle = _mm_set1_epi64x(0x0703060205010400);
+ const __m128i shift = _mm_set_epi32(0, 0, 4, 0);
};
struct DequantizerIQ3K final : public BaseDequantizer<block_iq3_k> {
@@ -1107,8 +1161,8 @@ struct DequantizerIQ6K final : public BaseDequantizer<block_iq6_k> {
const __m512i permute2 = _mm512_set_epi64(15, 14, 13, 12, 7, 6, 5, 4);
};
-struct DequantizerIQ4XXS final : public BaseDequantizer<block_iq4_ks, true> {
- DequantizerIQ4XXS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_512()) {}
+struct DequantizerIQ4KS final : public BaseDequantizer<block_iq4_ks, true> {
+ DequantizerIQ4KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_512()) {}
template <typename Q8>
inline void new_block(int i, const Q8& q8, __m256 * accm, __m512i * scales) {
auto scales128 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i *)x[i].scales));
@@ -1555,13 +1609,13 @@ struct DequantizerIQ2K final : public BaseDequantizer<block_iq2_k> {
inline __m128i make_scales(const uint8_t * scales_l) const {
uint64_t aux64; std::memcpy(&aux64, scales_l, 8);
auto scl = _mm_and_si128(_mm_set_epi64x(aux64 >> 4, aux64), maskl);
- return _mm_add_epi8(_mm_slli_epi16(scl, 1), m15);
+ return _mm_add_epi8(scl, m8);
}
Q2Bits bits;
const IQXKScales iqxk;
const __m256i values;
- const __m128i m15 = _mm_set1_epi8(-15);
+ const __m128i m8 = _mm_set1_epi8(-8);
const __m128i maskl = _mm_set1_epi8(0xf);
};
@@ -1740,8 +1794,8 @@ struct DequantizerIQ6K final : public BaseDequantizer<block_iq6_k> {
const __m256i mh = _mm256_set1_epi8(-128); // to avoid stupid warning about 0x80 overflowing
};
-struct DequantizerIQ4XXS final : public BaseDequantizer<block_iq4_ks, true> {
- DequantizerIQ4XXS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_256()) {}
+struct DequantizerIQ4KS final : public BaseDequantizer<block_iq4_ks, true> {
+ DequantizerIQ4KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_iq4nl_values_256()) {}
template <typename Q8>
inline __m256i new_block(int i, const Q8& q8, __m256 * accd) {
auto scales128 = _mm_cvtepu8_epi16(_mm_loadl_epi64((const __m128i *)x[i].scales));
@@ -1771,6 +1825,49 @@ struct DequantizerIQ4XXS final : public BaseDequantizer<block_iq4_ks, true> {
const __m256i shuff2 = _mm256_set_epi64x(0x0f0e0f0e0d0c0d0c, 0x0b0a0b0a09080908, 0x0f0e0f0e0d0c0d0c, 0x0b0a0b0a09080908);
};
+struct DequantizerIQ2KS final : public BaseDequantizer<block_iq2_ks, true, true> {
+ DequantizerIQ2KS(const void * vx, size_t bx) : BaseDequantizer(vx, bx), values(load_values()) {}
+ template <typename Q8>
+ inline __m256i new_block(int i, const Q8& q8, __m256 * accm) {
+ auto scales128 = make_scales(x[i].scales, x[i].extra >> 8);
+ auto shifts = _mm_and_si128(_mm_cmpeq_epi8(_mm_and_si128(_mm_set1_epi8(x[i].extra), hmask), hmask), m5);
+ auto scales_s = _mm_mullo_epi16(scales128, _mm_cvtepi8_epi16(_mm_add_epi8(m32, shifts)));
+ s8k.accum_mins(scales_s, q8, i, d, accm);
+ return MM256_SET_M128I(scales128, scales128);
+ }
+ inline void prepare(int i, int j) {
+ bits.prepare(x[i].qs, j);
+ bits.values[0] = _mm256_shuffle_epi8(values, bits.values[0]);
+ bits.values[1] = _mm256_shuffle_epi8(values, bits.values[1]);
+ bits.values[2] = _mm256_shuffle_epi8(values, bits.values[2]);
+ bits.values[3] = _mm256_shuffle_epi8(values, bits.values[3]);
+ }
+ static inline __m256i load_values() {
+ static const uint8_t kvalues_iq2nl[16] = {1, 19, 33, 49, 0, 0, 0, 0, 6, 24, 38, 54, 0, 0, 0, 0};
+ auto val128 = _mm_loadu_si128((const __m128i *)kvalues_iq2nl);
+ return MM256_SET_M128I(val128, val128);
+ }
+ inline __m128i make_scales(const uint8_t * scales_l, uint8_t scales_h) const {
+ const uint16_t * scales = (const uint16_t *)scales_l;
+ uint32_t aux32 = scales[0] | (uint32_t(scales[1]) << 16);
+ auto scl = _mm_srlv_epi32(_mm_set1_epi32(aux32), shift);
+ scl = _mm_and_si128(_mm_shuffle_epi8(scl, shuffle), _mm_set1_epi8(0xf));
+ auto sch = _mm_set1_epi8(scales_h);
+ sch = _mm_and_si128(_mm_cmpeq_epi8(_mm_and_si128(sch, hmask), _mm_setzero_si128()), m16);
+ return _mm_cvtepi8_epi16(_mm_add_epi8(scl, sch));
+ }
+ Q2Bits bits;
+ Scales8KBase s8k;
+
+ const __m256i values;
+ const __m128i m16 = _mm_set1_epi8(-16);
+ const __m128i m5 = _mm_set1_epi8(5);
+ const __m128i m32 = _mm_set1_epi8(-32);
+ const __m128i hmask = _mm_set1_epi64x(0x8040201008040201);
+ const __m128i shuffle = _mm_set1_epi64x(0x0703060205010400);
+ const __m128i shift = _mm_set_epi32(0, 0, 4, 0);
+};
+
struct DequantizerQ5K final : public BaseDequantizer<block_q5_K> {
DequantizerQ5K(const void * vx, size_t bx) : BaseDequantizer(vx, bx) {}
template <typename Q8>
@@ -3751,7 +3848,7 @@ template <typename Dequantizer> void MulMat::set_functions(MulMat& m) {
std::is_same_v<Dequantizer, DequantizerIQ4K> ||
std::is_same_v<Dequantizer, DequantizerIQ3K> ||
std::is_same_v<Dequantizer, DequantizerIQ4XS>||
- std::is_same_v<Dequantizer, DequantizerIQ4XXS>) {
+ std::is_same_v<Dequantizer, DequantizerIQ4KS>) {
m.funcs[0] = mul_mat_iqX_k_q8_K_AVX512<Dequantizer, 1>;
m.funcs[1] = mul_mat_iqX_k_q8_K_AVX512<Dequantizer, 2>;
m.funcs[2] = mul_mat_iqX_k_q8_K_AVX512<Dequantizer, 3>;
@@ -3913,12 +4010,16 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& mm, int Ny) {
break;
case GGML_TYPE_IQ4_KS:
assert (ne00 % QK_K == 0);
- MulMat::set_functions<DequantizerIQ4XXS>(mm);
+ MulMat::set_functions<DequantizerIQ4KS>(mm);
break;
case GGML_TYPE_IQ2_K:
assert (ne00 % QK_K == 0);
MulMat::set_functions<DequantizerIQ2K>(mm);
break;
+ case GGML_TYPE_IQ2_KS:
+ assert (ne00 % QK_K == 0);
+ MulMat::set_functions<DequantizerIQ2KS>(mm);
+ break;
case GGML_TYPE_IQ3_K:
assert (ne00 % QK_K == 0);
MulMat::set_functions<DequantizerIQ3K>(mm);
@@ -4224,14 +4325,20 @@ struct Q2bits {
}
};
-template <typename block_q, bool has_row_scale = false>
+template <typename block_q, bool has_row_scale = false, bool scale_is_f16 = false>
struct BaseDequantizer {
BaseDequantizer(const void * vx, size_t bx, int nrc) : vx(vx), x(nullptr), bx(bx), nrc(nrc) {}
inline void new_row(int ix) {
if constexpr (has_row_scale) {
- const float * dptr = (const float *)((const char *)vx + ix*bx);
- d = *dptr;
- x = (const block_q *)(dptr + 1);
+ if constexpr (scale_is_f16) {
+ const ggml_half * dptr = (const ggml_half *)((const char *)vx + ix*bx);
+ d = GGML_FP16_TO_FP32(*dptr);
+ x = (const block_q *)(dptr + 1);
+ } else {
+ const float * dptr = (const float *)((const char *)vx + ix*bx);
+ d = *dptr;
+ x = (const block_q *)(dptr + 1);
+ }
} else {
x = (const block_q *)((const char *)vx + ix*bx);
}
@@ -4683,7 +4790,7 @@ struct DequantizerIQ2K final : public BaseDequantizer<block_iq2_k> {
inline int8x16_t make_scales(const uint8_t * scales_l) const {
uint8x8_t aux = vld1_u8(scales_l);
uint8x16_t scl8 = vandq_u8(vcombine_u8(aux, vshr_n_u8(aux, 4)), vdupq_n_u8(0xf));
- int8x16_t scales = vaddq_s8(vreinterpretq_s8_u8(vshlq_n_u8(scl8, 1)), vdupq_n_s8(-15));
+ int8x16_t scales = vaddq_s8(vreinterpretq_s8_u8(scl8), vdupq_n_s8(-8));
return vqtbl1q_s8(scales, hshuff);
}
@@ -4809,9 +4916,9 @@ struct DequantizerIQ4XS final : public BaseDequantizer<block_iq4_xs> {
};
-struct DequantizerIQ4XXS final : public BaseDequantizer<block_iq4_ks, true> {
+struct DequantizerIQ4KS final : public BaseDequantizer<block_iq4_ks, true> {
- DequantizerIQ4XXS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc), values(vld1q_s8_x2(iq4k_values)) {}
+ DequantizerIQ4KS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc), values(vld1q_s8_x2(iq4k_values)) {}
constexpr static int num_blocks() { return 8; }
constexpr static bool should_scale_quants() { return false; }
@@ -4838,6 +4945,42 @@ struct DequantizerIQ4XXS final : public BaseDequantizer<block_iq4_ks, true> {
const int16x8_t m127 = vdupq_n_s16(-127);
};
+struct DequantizerIQ2KS final : public BaseDequantizer<block_iq2_ks, true, true> {
+ DequantizerIQ2KS(const void * vx, size_t bx, int nrc) : BaseDequantizer(vx, bx, nrc) {}
+
+ constexpr static int num_blocks() { return 8; }
+ constexpr static bool should_scale_quants() { return false; }
+
+ template <typename Q8>
+ inline int32x4x2_t new_block(int i, [[maybe_unused]] const Q8& q8, [[maybe_unused]] float32x4_t * acc) {
+ const uint16_t * sc16 = (const uint16_t *)x[i].scales;
+ uint32_t aux32 = sc16[0] | (sc16[1] << 16);
+ uint8x8_t scales8 = vreinterpret_u8_u32(vdup_n_u32(aux32));
+ scales8 = vand_u8(vzip1_u8(scales8, vshr_n_u8(scales8, 4)), vdup_n_u8(0xf));
+ uint8x8_t sh = vand_u8(vceq_u8(vand_u8(vdup_n_u8(x[i].extra >> 8), hmask), vdup_n_u8(0)), vdup_n_u8(16));
+ int16x8_t scales16 = vmovl_s8(vsub_s8(vreinterpret_s8_u8(scales8), vreinterpret_s8_u8(sh)));
+ int32x4x2_t scales = {vmovl_s16(vget_low_s16(scales16)), vmovl_s16(vget_high_s16(scales16))};
+ return scales;
+ }
+ inline void prepare(int i, int j) {
+ uint8_t extra = x[i].extra >> 4*j;
+ bits.prepare(x[i].qs+32*j);
+ bits.b1.val[0] = vqtbl1q_s8(values.val[extra & 1], bits.b1.val[0]);
+ bits.b1.val[1] = vqtbl1q_s8(values.val[extra & 1], bits.b1.val[1]); extra >>= 1;
+ bits.b1.val[2] = vqtbl1q_s8(values.val[extra & 1], bits.b1.val[2]);
+ bits.b1.val[3] = vqtbl1q_s8(values.val[extra & 1], bits.b1.val[3]); extra >>= 1;
+ bits.b2.val[0] = vqtbl1q_s8(values.val[extra & 1], bits.b2.val[0]);
+ bits.b2.val[1] = vqtbl1q_s8(values.val[extra & 1], bits.b2.val[1]); extra >>= 1;
+ bits.b2.val[2] = vqtbl1q_s8(values.val[extra & 1], bits.b2.val[2]);
+ bits.b2.val[3] = vqtbl1q_s8(values.val[extra & 1], bits.b2.val[3]);
+ }
+
+ Q2bits bits;
+ const uint8x8_t hmask = vreinterpret_u8_u64(vdup_n_u64(0x8040201008040201));
+ const int8x16x2_t values = { vreinterpretq_s8_u64(vdupq_n_u64(0x1101f3e1)), vreinterpretq_s8_u64(vdupq_n_u64(0x1606f8e6)) };
+
+};
+
struct SimpleBits {
uint8x16x4_t b1;
uint8x16x4_t b2;
@@ -6571,7 +6714,10 @@ bool MulMat::prepare(int typeA, int typeB, int ne00, MulMat& m, int /*Ny*/) {
MulMat::set_functions<DequantizerIQ4XS>(m);
break;
case GGML_TYPE_IQ4_KS:
- MulMat::set_functions<DequantizerIQ4XXS>(m);
+ MulMat::set_functions<DequantizerIQ4KS>(m);
+ break;
+ case GGML_TYPE_IQ2_KS:
+ MulMat::set_functions<DequantizerIQ2KS>(m);
break;
case GGML_TYPE_IQ4_K:
MulMat::set_functions<DequantizerIQ4K>(m);
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp
index 430b629f..984801be 100644
--- a/ggml/src/iqk/iqk_quantize.cpp
+++ b/ggml/src/iqk/iqk_quantize.cpp
@@ -30,6 +30,50 @@ inline int nearest_int(float fval) {
return (i & 0x007fffff) - 0x00400000;
}
+float make_qx_quants(int n, int nmax, const float * x, int8_t * L, const float * qw) {
+ float max = 0;
+ float amax = 0;
+ for (int i = 0; i < n; ++i) {
+ float ax = fabsf(x[i]);
+ if (ax > amax) { amax = ax; max = x[i]; }
+ }
+ if (!amax) { // all zero
+ for (int i = 0; i < n; ++i) L[i] = 0;
+ return 0.f;
+ }
+ float iscale = -nmax / max;
+ float sumlx = 0;
+ float suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = std::max(-nmax, std::min(nmax-1, l));
+ L[i] = l + nmax;
+ sumlx += qw[i]*x[i]*l;
+ suml2 += qw[i]*l*l;
+ }
+ float scale = suml2 ? sumlx/suml2 : 0.0f;
+ float best = scale * sumlx;
+ for (int is = -9; is <= 9; ++is) {
+ if (is == 0) continue;
+ iscale = -(nmax + 0.1f*is) / max;
+ sumlx = suml2 = 0;
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ l = std::max(-nmax, std::min(nmax-1, l));
+ sumlx += qw[i]*x[i]*l;
+ suml2 += qw[i]*l*l;
+ }
+ if (suml2 > 0 && sumlx*sumlx > best*suml2) {
+ for (int i = 0; i < n; ++i) {
+ int l = nearest_int(iscale * x[i]);
+ L[i] = nmax + std::max(-nmax, std::min(nmax-1, l));
+ }
+ scale = sumlx/suml2; best = scale*sumlx;
+ }
+ }
+ return scale;
+}
+
struct IQ1BNQuantizer {
int8_t L[QK_IQ1BN];
void quantize_one_row_1bn(const float * src, block_iq1_bn * y, int n_per_row, const float * imatrix);
@@ -507,6 +551,8 @@ void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const fl
float scales[QK_K/kBlockSize];
float weight[kBlockSize];
float sumx[kBlockSize+1], sumw[kBlockSize+1];
+ float sw[QK_K/kBlockSize];
+ int8_t Ls[QK_K/kBlockSize];
std::array<std::pair<float,int>, kBlockSize> pairs;
@@ -524,7 +570,7 @@ void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const fl
uint16_t extra = 0;
- float max_abs_scale = 0;
+ float max_abs_scale = 0, max_scale = 0;
for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
const float * xb = xbl + kBlockSize*ib;
@@ -534,7 +580,11 @@ void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const fl
} else {
for (int j = 0; j < kBlockSize; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
}
- for (int j = 0; j < kBlockSize; ++j) pairs[j] = {xb[j], j};
+ sw[ib] = 0;
+ for (int j = 0; j < kBlockSize; ++j) {
+ sw[ib] += weight[j];
+ pairs[j] = {xb[j], j};
+ }
std::sort(pairs.begin(), pairs.end());
sumx[0] = sumw[0] = 0;
for (int j = 0; j < kBlockSize; ++j) {
@@ -583,21 +633,25 @@ void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const fl
if (is_shifted) extra |= (1 << ib);
float abs_scale = fabsf(scales[ib]);
- max_abs_scale = MAX(max_abs_scale, abs_scale);
+ if (abs_scale > max_abs_scale) {
+ max_abs_scale = abs_scale;
+ max_scale = scales[ib];
+ }
}
if (!max_abs_scale) continue;
+ float d = make_qx_quants(QK_K/kBlockSize, 8, scales, Ls, sw);
+ if (!d) continue;
- float d = max_abs_scale/15;
+ //float d = -max_scale/8;
y[ibl].extra = extra;
float id = 1/d;
float sumqx = 0, sumq2 = 0;
for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
- int ls = nearest_int(0.5f*(id*scales[ib]+15));
- ls = MAX(0, MIN(15, ls));
- y[ibl].scales[ib/2] |= (ls << 4*(ib%2));
- ls = 2*ls - 15;
+ int ls = nearest_int(id*scales[ib]);
+ ls = std::max(-8, std::min(7, ls));
+ y[ibl].scales[ib/2] |= ((ls + 8) << 4*(ib%2));
float dl = d * ls;
if (dl) {
const int8_t * block_values = y[ibl].extra & (1 << ib) ? shifted_values : iq2nl_values;
@@ -623,7 +677,7 @@ void quantize_row_iq2_k_impl(const float * x, void * vy, int n_per_row, const fl
}
}
}
- y[ibl].d = GGML_FP32_TO_FP16(1.025f*(sumq2 > 0 ? sumqx/sumq2 : d));
+ y[ibl].d = GGML_FP32_TO_FP16(1.030f*(sumq2 > 0 ? sumqx/sumq2 : d));
}
}
@@ -665,8 +719,8 @@ void dequantize_row_iq2_k(const block_iq2_k * GGML_RESTRICT x, float * GGML_RES
int shift = 0;
for (int ib32 = 0; ib32 < QK_K/32; ++ib32) {
- float dl1 = d * (2*(x[i].scales[ib32] & 0xf) - 15);
- float dl2 = d * (2*(x[i].scales[ib32] >> 4) - 15);
+ float dl1 = d * ((x[i].scales[ib32] & 0xf) - 8);
+ float dl2 = d * ((x[i].scales[ib32] >> 4) - 8);
const int8_t * values1 = extra & 1 ? iq2nl_values + 4 : iq2nl_values;
const int8_t * values2 = extra & 2 ? iq2nl_values + 4 : iq2nl_values;
extra >>= 2;
@@ -701,6 +755,347 @@ void vec_dot_iq2_k_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void *
}
+namespace {
+void quantize_row_iq2_ks_impl(const float * x, void * vy, int n_per_row, const float * quant_weights, float * all_scales, float * all_sw, int8_t * all_Ls) {
+
+ constexpr int kBlockSize = 32;
+ constexpr int kMax_i1 = 3*kBlockSize/4;
+ constexpr int kMin_i3 = kBlockSize/4;
+ //constexpr int kNtry = 5;
+ //constexpr float kStep = 1.f;
+
+ ggml_half * dptr = (ggml_half *)vy;
+ *dptr = GGML_FP32_TO_FP16(0.f);
+
+ block_iq2_ks * y = (block_iq2_ks *)(dptr + 1);
+
+ float weight[kBlockSize];
+ float sumx[kBlockSize+1], sumw[kBlockSize+1];
+
+ std::array<std::pair<float,int>, kBlockSize> pairs;
+
+ float val [4] = {float(iq2nl_values[0]), float(iq2nl_values[1]), float(iq2nl_values[2]), float(iq2nl_values[3])};
+ float sval[4] = {float(iq2nl_values[4]), float(iq2nl_values[5]), float(iq2nl_values[6]), float(iq2nl_values[7])};
+
+ const int8_t * shifted_values = iq2nl_values + 4;
+
+ const int nblock = n_per_row/QK_K;
+
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+
+ memset(&y[ibl], 0, sizeof(block_iq2_ks));
+
+ auto scales = all_scales + ibl*(QK_K/kBlockSize);
+ auto sw = all_sw + ibl*(QK_K/kBlockSize);
+
+ const float * xbl = x + ibl*QK_K;
+ float sumx2 = 0;
+ for (int j = 0; j < QK_K; ++j) sumx2 += xbl[j]*xbl[j];
+ const float sigma2 = 1.5f*sumx2/QK_K;
+
+ uint16_t extra = 0;
+
+ for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
+ const float * xb = xbl + kBlockSize*ib;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*QK_K + ib*kBlockSize;
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
+ }
+ sw[ib] = 0;
+ for (int j = 0; j < kBlockSize; ++j) {
+ sw[ib] += weight[j];
+ pairs[j] = {xb[j], j};
+ }
+ //float amax = 0, max = 0;
+ //for (int j = 0; j < kBlockSize; ++j) {
+ // float ax = fabsf(xb[j]);
+ // if (ax > amax) {
+ // amax = ax; max = xb[j];
+ // }
+ //}
+ //if (!amax) {
+ // scales[ib] = 0;
+ // continue;
+ //}
+ //float d = kNtry > 0 ? -max/iq2nl_values[0] : max/iq2nl_values[0];
+ //float id = 1/d;
+ //float sumqx_p = 0, sumq2_p = 0;
+ //float sumqx_m = 0, sumq2_m = 0;
+ //for (int j = 0; j < kBlockSize; ++j) {
+ // float w = weight[j];
+ // float al = id*xb[j];
+ // int l = best_index_iq2nl(iq2nl_values, al);
+ // float q = iq2nl_values[l];
+ // sumqx_p += w*q*xb[j];
+ // sumq2_p += w*q*q;
+ // l = best_index_iq2nl(iq2nl_values, -al);
+ // q = iq2nl_values[l];
+ // sumqx_m += w*q*xb[j];
+ // sumq2_m += w*q*q;
+ //}
+ //d = sumqx_p/sumq2_p;
+ //float best = d*sumqx_p;
+ //if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ // d = sumqx_m/sumq2_m; best = d*sumqx_m;
+ //}
+ //bool is_shifted = false;
+ //for (int itry = -kNtry; itry <= kNtry; ++itry) {
+ // id = (kStep*itry + iq2nl_values[0])/max;
+ // sumqx_p = sumq2_p = 0;
+ // sumqx_m = sumq2_m = 0;
+ // for (int j = 0; j < kBlockSize; ++j) {
+ // float w = weight[j];
+ // float al = id*xb[j];
+ // int l = best_index_iq2nl(iq2nl_values, al);
+ // float q = iq2nl_values[l];
+ // sumqx_p += w*q*xb[j];
+ // sumq2_p += w*q*q;
+ // l = best_index_iq2nl(iq2nl_values, -al);
+ // q = iq2nl_values[l];
+ // sumqx_m += w*q*xb[j];
+ // sumq2_m += w*q*q;
+ // }
+ // if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ // d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = false;
+ // }
+ // if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ // d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = false;
+ // }
+ // id = (kStep*itry + shifted_values[0])/max;
+ // sumqx_p = sumq2_p = 0;
+ // sumqx_m = sumq2_m = 0;
+ // for (int j = 0; j < kBlockSize; ++j) {
+ // float w = weight[j];
+ // float al = id*xb[j];
+ // int l = best_index_iq2nl(shifted_values, al);
+ // float q = shifted_values[l];
+ // sumqx_p += w*q*xb[j];
+ // sumq2_p += w*q*q;
+ // l = best_index_iq2nl(shifted_values, -al);
+ // q = shifted_values[l];
+ // sumqx_m += w*q*xb[j];
+ // sumq2_m += w*q*q;
+ // }
+ // if (sumq2_p > 0 && sumqx_p*sumqx_p > best*sumq2_p) {
+ // d = sumqx_p/sumq2_p; best = d * sumqx_p; is_shifted = true;
+ // }
+ // if (sumq2_m > 0 && sumqx_m*sumqx_m > best*sumq2_m) {
+ // d = sumqx_m/sumq2_m; best = d * sumqx_m; is_shifted = true;
+ // }
+ //}
+ std::sort(pairs.begin(), pairs.end());
+ sumx[0] = sumw[0] = 0;
+ for (int j = 0; j < kBlockSize; ++j) {
+ int jj = pairs[j].second;
+ sumw[j+1] = sumw[j] + weight[jj];
+ sumx[j+1] = sumx[j] + weight[jj]*xb[jj];
+ }
+ float best = 0, d = 0;
+ bool is_shifted = false;
+ float sumqx, sumq2;
+ for (int i1 = 0; i1 < kMax_i1; ++i1) {
+ for (int i2 = i1; i2 < kBlockSize; ++i2) {
+ for (int i3 = std::max(i2, kMin_i3); i3 < kBlockSize; ++i3) {
+ sumqx = (sumx[i1] - sumx[ 0])*val[0] + (sumx[i2] - sumx[i1])*val[1]
+ + (sumx[i3] - sumx[i2])*val[2] + (sumx[kBlockSize] - sumx[i3])*val[3];
+ sumq2 = (sumw[i1] - sumw[ 0])*val[0]*val[0] + (sumw[i2] - sumw[i1])*val[1]*val[1]
+ + (sumw[i3] - sumw[i2])*val[2]*val[2] + (sumw[kBlockSize] - sumw[i3])*val[3]*val[3];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = false;
+ }
+ sumqx = (sumx[i1] - sumx[ 0])*sval[0] + (sumx[i2] - sumx[i1])*sval[1]
+ + (sumx[i3] - sumx[i2])*sval[2] + (sumx[kBlockSize] - sumx[i3])*sval[3];
+ sumq2 = (sumw[i1] - sumw[ 0])*sval[0]*sval[0] + (sumw[i2] - sumw[i1])*sval[1]*sval[1]
+ + (sumw[i3] - sumw[i2])*sval[2]*sval[2] + (sumw[kBlockSize] - sumw[i3])*sval[3]*sval[3];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = true;
+ }
+ sumqx = (sumx[i1] - sumx[ 0])*val[3] + (sumx[i2 ] - sumx[i1])*val[2]
+ + (sumx[i3] - sumx[i2])*val[1] + (sumx[kBlockSize] - sumx[i3])*val[0];
+ sumq2 = (sumw[i1] - sumw[ 0])*val[3]*val[3] + (sumw[i2 ] - sumw[i1])*val[2]*val[2]
+ + (sumw[i3] - sumw[i2])*val[1]*val[1] + (sumw[kBlockSize] - sumw[i3])*val[0]*val[0];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = false;
+ }
+ sumqx = (sumx[i1] - sumx[ 0])*sval[3] + (sumx[i2 ] - sumx[i1])*sval[2]
+ + (sumx[i3] - sumx[i2])*sval[1] + (sumx[kBlockSize] - sumx[i3])*sval[0];
+ sumq2 = (sumw[i1] - sumw[ 0])*sval[3]*sval[3] + (sumw[i2 ] - sumw[i1])*sval[2]*sval[2]
+ + (sumw[i3] - sumw[i2])*sval[1]*sval[1] + (sumw[kBlockSize] - sumw[i3])*sval[0]*sval[0];
+ if (sumq2 > 0 && sumqx*sumqx > best*sumq2) {
+ d = sumqx/sumq2; best = d*sumqx; is_shifted = true;
+ }
+ }
+ }
+ }
+ scales[ib] = d;
+ if (is_shifted) extra |= (1 << ib);
+
+ }
+ y[ibl].extra = extra;
+
+ }
+
+ float d = make_qx_quants(nblock*(QK_K/kBlockSize), 16, all_scales, all_Ls, all_sw);
+
+ if (!d) return;
+
+ float sumqx = 0, sumq2 = 0;
+ for (int ibl = 0; ibl < nblock; ++ibl) {
+ auto xbl = x + ibl*QK_K;
+ float sumx2 = 0;
+ for (int j = 0; j < QK_K; ++j) sumx2 += xbl[j]*xbl[j];
+ const float sigma2 = 1.5f*sumx2/QK_K;
+ auto Ls = all_Ls + ibl*(QK_K/kBlockSize);
+ for (int ib = 0; ib < QK_K/kBlockSize; ++ib) {
+ int ls = Ls[ib];
+ y[ibl].scales[ib/2] |= ((ls & 0xf) << 4*(ib%2));
+ y[ibl].extra |= ((ls >> 4) << (8 + ib));
+ ls -= 16;
+ float dl = d * ls;
+ if (dl) {
+ const int8_t * block_values = y[ibl].extra & (1 << ib) ? shifted_values : iq2nl_values;
+ const float * xb = xbl + kBlockSize*ib;
+ if (quant_weights) {
+ const float * qw = quant_weights + ibl*QK_K + ib*kBlockSize;
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = qw[j] * sqrtf(sigma2 + xb[j]*xb[j]);
+ } else {
+ for (int j = 0; j < kBlockSize; ++j) weight[j] = 0.25f*sigma2 + xb[j]*xb[j];
+ }
+ float idl = 1/dl;
+ uint8_t * qs = y[ibl].qs + 32*(ib/4);
+ for (int j = 0; j < 32; ++j) {
+ const float al = idl*xb[j];
+ int ibest = best_index_iq2nl(block_values, al);
+ qs[j] |= (ibest << 2*(ib%4));
+ float w = weight[j];
+ float q = block_values[ibest]*ls;
+ sumqx += w*q*xb[j];
+ sumq2 += w*q*q;
+ }
+ }
+ }
+ }
+ *dptr = GGML_FP32_TO_FP16(1.030f*(sumq2 > 0 ? sumqx/sumq2 : d));
+}
+}
+
+void quantize_row_iq2_ks_ref(const float * GGML_RESTRICT x, block_iq2_ks * GGML_RESTRICT y, int64_t k) {
+ assert(k % QK_K == 0);
+ quantize_iq2_ks(x, (void *)y, 1, k, nullptr);
+}
+
+void quantize_row_iq2_ks(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
+ assert(k % QK_K == 0);
+ block_iq2_ks * y = (block_iq2_ks *)vy;
+ quantize_row_iq2_ks_ref(x, y, k);
+}
+
+size_t quantize_iq2_ks(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+ constexpr int kBlockSize = 32;
+ GGML_ASSERT(n_per_row%QK_K == 0);
+ auto row_size = ggml_row_size(GGML_TYPE_IQ2_KS, n_per_row);
+ int nblock = n_per_row/QK_K;
+ std::vector<float> all_scales(nblock*(QK_K/kBlockSize)), all_sw(nblock*(QK_K/kBlockSize));
+ std::vector<int8_t> all_Ls(nblock*(QK_K/kBlockSize));
+ char * qrow = (char *)dst;
+ for (int64_t row = 0; row < nrows; ++row) {
+ quantize_row_iq2_ks_impl(src, (void *)qrow, n_per_row, imatrix, all_scales.data(), all_sw.data(), all_Ls.data());
+ src += n_per_row;
+ qrow += row_size;
+ }
+ return nrows * row_size;
+}
+
+void dequantize_row_iq2_ks(const block_iq2_ks * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k) {
+ assert(k % QK_K == 0);
+ const int nb = k / QK_K;
+
+ const ggml_half * dptr = (const ggml_half *)x;
+ const float d = GGML_FP16_TO_FP32(*dptr);
+ x = (const block_iq2_ks *)(dptr + 1);
+
+ for (int i = 0; i < nb; i++) {
+
+ const uint8_t * qs = x[i].qs;
+
+ uint16_t extra = x[i].extra;
+
+ int shift = 0;
+ for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
+ float dl1 = d * (((x[i].scales[ib64] & 0xf) | ((extra >> 4) & 0x10)) - 16);
+ float dl2 = d * (((x[i].scales[ib64] >> 4) | ((extra >> 5) & 0x10)) - 16);
+ const int8_t * values1 = extra & 1 ? iq2nl_values + 4 : iq2nl_values;
+ const int8_t * values2 = extra & 2 ? iq2nl_values + 4 : iq2nl_values;
+ extra >>= 2;
+ for (int j = 0; j < 32; ++j) {
+ y[j+ 0] = dl1 * values1[(qs[j] >> (shift+0)) & 3];
+ y[j+32] = dl2 * values2[(qs[j] >> (shift+2)) & 3];
+ }
+ y += 64;
+ shift += 4;
+ if (shift == 8) { qs += 32; shift = 0; }
+ }
+
+ }
+
+}
+
+void vec_dot_iq2_ks_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ GGML_UNUSED(nrc);
+ GGML_UNUSED(bx);
+ GGML_UNUSED(by);
+ GGML_UNUSED(bs);
+
+#if GGML_USE_IQK_MULMAT
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ2_KS, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
+ return;
+ }
+#endif
+
+ const ggml_half * dptr = (const ggml_half *)vx;
+ const float d = GGML_FP16_TO_FP32(*dptr);
+ const block_iq2_ks * x = (const block_iq2_ks *)(dptr + 1);
+ const block_q8_K * y = (const block_q8_K *)vy;
+
+ const int nb = n / QK_K;
+ float sumf = 0;
+ for (int i = 0; i < nb; i++) {
+ const uint8_t * qs = x[i].qs;
+ const int8_t * q8 = y[i].qs;
+ uint16_t extra = x[i].extra;
+ int sumi = 0;
+ for (int ib128 = 0; ib128 < QK_K/128; ++ib128) {
+ int d1 = (((x[i].scales[2*ib128+0] & 0xf) | ((extra >> 4) & 0x10)) - 16);
+ int d2 = (((x[i].scales[2*ib128+0] >> 4) | ((extra >> 5) & 0x10)) - 16);
+ int d3 = (((x[i].scales[2*ib128+1] & 0xf) | ((extra >> 6) & 0x10)) - 16);
+ int d4 = (((x[i].scales[2*ib128+1] >> 4) | ((extra >> 7) & 0x10)) - 16);
+ const int8_t * values1 = extra & 1 ? iq2nl_values + 4 : iq2nl_values;
+ const int8_t * values2 = extra & 2 ? iq2nl_values + 4 : iq2nl_values;
+ const int8_t * values3 = extra & 4 ? iq2nl_values + 4 : iq2nl_values;
+ const int8_t * values4 = extra & 8 ? iq2nl_values + 4 : iq2nl_values;
+ extra >>= 4;
+ int sumi1 = 0, sumi2 = 0, sumi3 = 0, sumi4 = 0;
+ for (int j = 0; j < 32; ++j) {
+ sumi1 += q8[j+ 0] * values1[(qs[j] >> 0) & 3];
+ sumi2 += q8[j+32] * values2[(qs[j] >> 2) & 3];
+ sumi3 += q8[j+64] * values3[(qs[j] >> 4) & 3];
+ sumi4 += q8[j+96] * values4[(qs[j] >> 6) & 3];
+ }
+ sumi += d1*sumi1 + d2*sumi2 + d3*sumi3 + d4*sumi4;
+ q8 += 128;
+ qs += 32;
+ }
+ sumf += y[i].d * sumi;
+ }
+
+ *s = d * sumf;
+
+}
+
//
// ============================================== iq3_k
//
diff --git a/ggml/src/iqk/iqk_quantize.h b/ggml/src/iqk/iqk_quantize.h
index a3623963..eb562779 100644
--- a/ggml/src/iqk/iqk_quantize.h
+++ b/ggml/src/iqk/iqk_quantize.h
@@ -61,6 +61,12 @@ size_t quantize_iq4_ks(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst
void dequantize_row_iq4_ks(const block_iq4_ks * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
void vec_dot_iq4_ks_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+void quantize_row_iq2_ks_ref(const float * GGML_RESTRICT x, block_iq2_ks * GGML_RESTRICT y, int64_t k);
+void quantize_row_iq2_ks(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k);
+size_t quantize_iq2_ks(const float * GGML_RESTRICT src, void * GGML_RESTRICT dst, int64_t nrows, int64_t n_per_row, const float * imatrix);
+void dequantize_row_iq2_ks(const block_iq2_ks * GGML_RESTRICT x, float * GGML_RESTRICT y, int64_t k);
+void vec_dot_iq2_ks_q8_k(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc);
+
void iqk_quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k);
#ifdef __cplusplus
diff --git a/include/llama.h b/include/llama.h
index 9fb4af53..c9387e6b 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -179,6 +179,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_IQ1_TN = 144, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_KS = 145, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ3_KL = 146, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_IQ2_KS = 147, // except 1d tensors
LLAMA_FTYPE_GUESSED = 1024, // not specified in the model file
};
diff --git a/src/llama.cpp b/src/llama.cpp
index c338452b..b356f7bc 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -3783,6 +3783,7 @@ struct llama_model_loader {
case GGML_TYPE_Q6_K: ftype = LLAMA_FTYPE_MOSTLY_Q6_K; break;
case GGML_TYPE_IQ2_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XXS; break;
case GGML_TYPE_IQ2_XS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_XS; break;
+ case GGML_TYPE_IQ2_KS: ftype = LLAMA_FTYPE_MOSTLY_IQ2_KS; break;
case GGML_TYPE_IQ2_S: ftype = LLAMA_FTYPE_MOSTLY_IQ2_S; break;
case GGML_TYPE_IQ3_XXS: ftype = LLAMA_FTYPE_MOSTLY_IQ3_XXS; break;
case GGML_TYPE_IQ1_S: ftype = LLAMA_FTYPE_MOSTLY_IQ1_S; break;
@@ -4487,6 +4488,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) {
case LLAMA_FTYPE_MOSTLY_Q6_K: return "Q6_K";
case LLAMA_FTYPE_MOSTLY_IQ2_XXS: return "IQ2_XXS - 2.0625 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_XS: return "IQ2_XS - 2.3125 bpw";
+ case LLAMA_FTYPE_MOSTLY_IQ2_KS: return "IQ2_KS - 2.1875 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_S: return "IQ2_S - 2.5 bpw";
case LLAMA_FTYPE_MOSTLY_IQ2_M: return "IQ2_M - 2.7 bpw";
case LLAMA_FTYPE_MOSTLY_IQ3_XS: return "IQ3_XS - 3.3 bpw";
@@ -15645,7 +15647,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
}
else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ3_XXS ||
ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K) {
+ ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || ftype == LLAMA_FTYPE_MOSTLY_IQ2_K || ftype == LLAMA_FTYPE_MOSTLY_IQ3_K ||
+ ftype == LLAMA_FTYPE_MOSTLY_IQ2_KS) {
new_type = !qs.has_output ? GGML_TYPE_IQ4_K : GGML_TYPE_Q5_K;
}
else if ((ftype == LLAMA_FTYPE_MOSTLY_IQ3_S || ftype == LLAMA_FTYPE_MOSTLY_IQ3_M || ftype == LLAMA_FTYPE_MOSTLY_IQ4_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ4_KS) && !qs.has_output) {
@@ -15681,7 +15684,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
}
}
} else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S ||
- ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M) {
+ ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M ||
+ ftype == LLAMA_FTYPE_MOSTLY_IQ2_KS) {
if (name.find("attn_v.weight") != std::string::npos) {
if (qs.model.hparams.n_gqa() >= 4 || qs.model.hparams.n_expert >= 4) new_type = GGML_TYPE_IQ4_K;
else if (qs.model.hparams.n_gqa() >= 2 || qs.model.hparams.n_expert >= 2) new_type = GGML_TYPE_IQ3_K;
@@ -15905,7 +15909,8 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
new_type == GGML_TYPE_IQ3_XXS || new_type == GGML_TYPE_IQ1_S || new_type == GGML_TYPE_IQ3_S ||
new_type == GGML_TYPE_IQ1_M || new_type == GGML_TYPE_IQ4_K || new_type == GGML_TYPE_IQ2_K ||
new_type == GGML_TYPE_IQ5_K || new_type == GGML_TYPE_IQ3_K || new_type == GGML_TYPE_IQ2_TN ||
- new_type == GGML_TYPE_IQ6_K || new_type == GGML_TYPE_IQ1_TN || new_type == GGML_TYPE_IQ4_KS) {
+ new_type == GGML_TYPE_IQ6_K || new_type == GGML_TYPE_IQ1_TN || new_type == GGML_TYPE_IQ4_KS ||
+ new_type == GGML_TYPE_IQ2_KS) {
int nx = tensor->ne[0];
int ny = tensor->ne[1];
if (nx % QK_K != 0) {
@@ -15925,6 +15930,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n
switch (new_type) {
case GGML_TYPE_IQ2_XXS:
case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ2_KS:
case GGML_TYPE_IQ2_S:
case GGML_TYPE_IQ3_XXS:
case GGML_TYPE_IQ3_S:
@@ -16036,6 +16042,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
case LLAMA_FTYPE_MOSTLY_Q6_K: default_type = GGML_TYPE_Q6_K; break;
case LLAMA_FTYPE_MOSTLY_IQ2_XXS: default_type = GGML_TYPE_IQ2_XXS; break;
case LLAMA_FTYPE_MOSTLY_IQ2_XS: default_type = GGML_TYPE_IQ2_XS; break;
+ case LLAMA_FTYPE_MOSTLY_IQ2_KS: default_type = GGML_TYPE_IQ2_KS; break;
case LLAMA_FTYPE_MOSTLY_IQ2_S: default_type = GGML_TYPE_IQ2_XS; break;
case LLAMA_FTYPE_MOSTLY_IQ2_M: default_type = GGML_TYPE_IQ2_S; break;
case LLAMA_FTYPE_MOSTLY_IQ3_XXS: default_type = GGML_TYPE_IQ3_XXS; break;