summaryrefslogtreecommitdiff
path: root/ggml/src/iqk/iqk_quantize.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'ggml/src/iqk/iqk_quantize.cpp')
-rw-r--r--ggml/src/iqk/iqk_quantize.cpp72
1 files changed, 36 insertions, 36 deletions
diff --git a/ggml/src/iqk/iqk_quantize.cpp b/ggml/src/iqk/iqk_quantize.cpp
index e741a8ea..9ce5731d 100644
--- a/ggml/src/iqk/iqk_quantize.cpp
+++ b/ggml/src/iqk/iqk_quantize.cpp
@@ -3622,16 +3622,16 @@ void vec_dot_iq4_nl_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t
}
//
-// ========================================= q4_0_r4
+// ========================================= q4_0_r8
//
-void quantize_row_q4_0_r4_ref(const float * x, block_iq4_nl_r8 * y, int64_t k) {
+void quantize_row_q4_0_r8_ref(const float * x, block_iq4_nl_r8 * y, int64_t k) {
// we assume we are called with 8 rows
- quantize_q4_0_r4(x, (void *)y, 8, k/8, nullptr);
+ quantize_q4_0_r8(x, (void *)y, 8, k/8, nullptr);
}
-void quantize_row_q4_0_r4(const float * x, void * y, int64_t k) {
+void quantize_row_q4_0_r8(const float * x, void * y, int64_t k) {
// we assume we are called with 8 rows
- quantize_q4_0_r4(x, y, 8, k/8, nullptr);
+ quantize_q4_0_r8(x, y, 8, k/8, nullptr);
}
static void repack_q4_0(int nrows, int n_per_row, const block_q4_0 * x, block_iq4_nl_r8 * y, [[maybe_unused]] bool online) {
@@ -3664,7 +3664,7 @@ static void repack_q4_0(int nrows, int n_per_row, const block_q4_0 * x, block_iq
}
}
#ifdef __ARM_NEON
-static void modify_q4_0_r4(int64_t k, char * cy) {
+static void modify_q4_0_r8(int64_t k, char * cy) {
auto y = (block_iq4_nl_r8 *)cy;
int nb = k/(32*8);
for (int ib = 0; ib < nb; ++ib) {
@@ -3680,7 +3680,7 @@ static void modify_q4_0_r4(int64_t k, char * cy) {
}
#endif
-size_t quantize_q4_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+size_t quantize_q4_0_r8(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
GGML_ASSERT(nrows%8 == 0);
auto row_size_nl = ggml_row_size(GGML_TYPE_IQ4_NL, n_per_row);
std::vector<char> qtmp(8*row_size_nl);
@@ -3694,7 +3694,7 @@ size_t quantize_q4_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_
return nrows*row_size_nl;
}
-void dequantize_row_q4_0_r4(const block_iq4_nl_r8 * x, float * y, int64_t k) {
+void dequantize_row_q4_0_r8(const block_iq4_nl_r8 * x, float * y, int64_t k) {
// we assume we are called with 8 rows
int n_per_row = k/8;
int nb = n_per_row/QK4_0;
@@ -3713,9 +3713,9 @@ void dequantize_row_q4_0_r4(const block_iq4_nl_r8 * x, float * y, int64_t k) {
}
}
-void vec_dot_q4_0_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+void vec_dot_q4_0_r8_q8_0(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
#if GGML_USE_IQK_MULMAT
- if (iqk_mul_mat(1, 1, n, GGML_TYPE_Q4_0_R4, vx, 0, GGML_TYPE_Q8_0, vy, 0, s, 0, 0, 1)) {
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_Q4_0_R8, vx, 0, GGML_TYPE_Q8_0, vy, 0, s, 0, 0, 1)) {
return;
}
#endif
@@ -3728,16 +3728,16 @@ void vec_dot_q4_0_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t b
//
-// ========================================= q8_0_r4
+// ========================================= q8_0_r8
//
-void quantize_row_q8_0_r4_ref(const float * x, block_q8_0_r8 * y, int64_t k) {
+void quantize_row_q8_0_r8_ref(const float * x, block_q8_0_r8 * y, int64_t k) {
// we assume we are called with 4 rows
- quantize_q8_0_r4(x, (void *)y, 8, k/8, nullptr);
+ quantize_q8_0_r8(x, (void *)y, 8, k/8, nullptr);
}
-void quantize_row_q8_0_r4(const float * x, void * y, int64_t k) {
+void quantize_row_q8_0_r8(const float * x, void * y, int64_t k) {
// we assume we are called with 4 rows
- quantize_q8_0_r4(x, y, 8, k/8, nullptr);
+ quantize_q8_0_r8(x, y, 8, k/8, nullptr);
}
static void repack_q8_0(int nrows, int n_per_row, const block_q8_0 * x, block_q8_0_r8 * y, [[maybe_unused]] bool online) {
@@ -3770,7 +3770,7 @@ static void repack_q8_0(int nrows, int n_per_row, const block_q8_0 * x, block_q8
}
#ifdef HAVE_FANCY_SIMD
-static void modify_q8_0_r4(int64_t k, char * cy) {
+static void modify_q8_0_r8(int64_t k, char * cy) {
auto y = (block_iq4_nl_r8 *)cy;
int nb = k/(32*8);
for (int ib = 0; ib < nb; ++ib) {
@@ -3782,7 +3782,7 @@ static void modify_q8_0_r4(int64_t k, char * cy) {
}
#endif
-size_t quantize_q8_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+size_t quantize_q8_0_r8(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
GGML_ASSERT(nrows%8 == 0);
auto row_size_0 = ggml_row_size(GGML_TYPE_Q8_0, n_per_row);
std::vector<char> qtmp(8*row_size_0);
@@ -3796,7 +3796,7 @@ size_t quantize_q8_0_r4(const float * src, void * dst, int64_t nrows, int64_t n_
return nrows*row_size_0;
}
-void dequantize_row_q8_0_r4(const block_q8_0_r8 * x, float * y, int64_t k) {
+void dequantize_row_q8_0_r8(const block_q8_0_r8 * x, float * y, int64_t k) {
// we assume we are called with 4 rows
int n_per_row = k/8;
int nb = n_per_row/QK8_0;
@@ -3813,9 +3813,9 @@ void dequantize_row_q8_0_r4(const block_q8_0_r8 * x, float * y, int64_t k) {
}
}
-void vec_dot_q8_0_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+void vec_dot_q8_0_r8_q8_0(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
#if GGML_USE_IQK_MULMAT
- if (iqk_mul_mat(1, 1, n, GGML_TYPE_Q8_0_R4, vx, 0, GGML_TYPE_Q8_0, vy, 0, s, 0, 0, 1)) {
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_Q8_0_R8, vx, 0, GGML_TYPE_Q8_0, vy, 0, s, 0, 0, 1)) {
return;
}
#endif
@@ -4025,18 +4025,18 @@ void vec_dot_q6_0_r4_q8_0(int n, float * s, size_t bs, const void * vx, size_t b
}
//
-// ========================================= iq4_xs_r4
+// ========================================= iq4_xs_r8
//
-void quantize_row_iq4_xs_r4_ref(const float * x, block_iq4_xs_r4 * y, int64_t k) {
- quantize_iq4_xs_r4(x, (void *)y, 8, k/8, nullptr);
+void quantize_row_iq4_xs_r8_ref(const float * x, block_iq4_xs_r8 * y, int64_t k) {
+ quantize_iq4_xs_r8(x, (void *)y, 8, k/8, nullptr);
}
-void quantize_row_iq4_xs_r4(const float * x, void * y, int64_t k) {
- quantize_iq4_xs_r4(x, y, 8, k/8, nullptr);
+void quantize_row_iq4_xs_r8(const float * x, void * y, int64_t k) {
+ quantize_iq4_xs_r8(x, y, 8, k/8, nullptr);
}
-static void repack_iq4_xs(int nrows, int n_per_row, const block_iq4_xs * x, block_iq4_xs_r4 * y, [[maybe_unused]] bool online) {
+static void repack_iq4_xs(int nrows, int n_per_row, const block_iq4_xs * x, block_iq4_xs_r8 * y, [[maybe_unused]] bool online) {
GGML_ASSERT(nrows%8 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
int nblock = n_per_row/QK_K;
@@ -4068,7 +4068,7 @@ static void repack_iq4_xs(int nrows, int n_per_row, const block_iq4_xs * x, bloc
}
}
-size_t quantize_iq4_xs_r4(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
+size_t quantize_iq4_xs_r8(const float * src, void * dst, int64_t nrows, int64_t n_per_row, const float * imatrix) {
GGML_ASSERT(nrows%8 == 0);
GGML_ASSERT(n_per_row%QK_K == 0);
char * qcur = (char *)dst;
@@ -4076,14 +4076,14 @@ size_t quantize_iq4_xs_r4(const float * src, void * dst, int64_t nrows, int64_t
std::vector<char> qtmp(8*row_size);
for (int row = 0; row < nrows; row += 8) {
quantize_iq4_xs(src, (void *)qtmp.data(), 8, n_per_row, imatrix);
- repack_iq4_xs(8, n_per_row, (const block_iq4_xs *)qtmp.data(), (block_iq4_xs_r4 *)qcur, false);
+ repack_iq4_xs(8, n_per_row, (const block_iq4_xs *)qtmp.data(), (block_iq4_xs_r8 *)qcur, false);
qcur += 8*row_size;
src += 8*n_per_row;
}
return nrows*row_size;
}
-void dequantize_row_iq4_xs_r4(const block_iq4_xs_r4 * x, float * y, int64_t k) {
+void dequantize_row_iq4_xs_r8(const block_iq4_xs_r8 * x, float * y, int64_t k) {
auto n_per_row = k/8;
float * y8[8];
for (int k = 0; k < 8; ++k) y8[k] = y + n_per_row*k;
@@ -4103,9 +4103,9 @@ void dequantize_row_iq4_xs_r4(const block_iq4_xs_r4 * x, float * y, int64_t k) {
}
}
-void vec_dot_iq4_xs_r4_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
+void vec_dot_iq4_xs_r8_q8_k(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) {
#if GGML_USE_IQK_MULMAT
- if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ4_XS_R4, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
+ if (iqk_mul_mat(1, 1, n, GGML_TYPE_IQ4_XS_R8, vx, 0, GGML_TYPE_Q8_K, vy, 0, s, 0, 0, 1)) {
return;
}
#endif
@@ -6329,10 +6329,10 @@ struct Modify {
bool iqk_modify_tensor(struct ggml_tensor * tensor) {
static const std::unordered_map<ggml_type, Modify> k_mod_map = {
#ifdef __ARM_NEON
- { GGML_TYPE_Q4_0_R4, {modify_q4_0_r4, 8} },
+ { GGML_TYPE_Q4_0_R8, {modify_q4_0_r8, 8} },
#endif
#ifdef HAVE_FANCY_SIMD
- { GGML_TYPE_Q8_0_R4, {modify_q8_0_r4, 8} },
+ { GGML_TYPE_Q8_0_R8, {modify_q8_0_r8, 8} },
{ GGML_TYPE_Q8_K_R8, {modify_q8_k_r8, 8} },
#endif
};
@@ -6373,7 +6373,7 @@ void iqk_repack_tensor(struct ggml_tensor * tensor) {
{ GGML_TYPE_IQ3_K, { GGML_TYPE_IQ3_K_R4, 4, (Repack::repack_func)repack_iq3_k} },
{ GGML_TYPE_IQ4_K, { GGML_TYPE_IQ4_K_R4, 4, (Repack::repack_func)repack_iq4_k} },
{ GGML_TYPE_IQ5_K, { GGML_TYPE_IQ5_K_R4, 4, (Repack::repack_func)repack_iq5_k} },
- { GGML_TYPE_IQ4_XS, { GGML_TYPE_IQ4_XS_R4, 8, (Repack::repack_func)repack_iq4_xs} },
+ { GGML_TYPE_IQ4_XS, { GGML_TYPE_IQ4_XS_R8, 8, (Repack::repack_func)repack_iq4_xs} },
{ GGML_TYPE_IQ4_KS, { GGML_TYPE_IQ4_KS_R4, 4, (Repack::repack_func)repack_iq4_ks} },
{ GGML_TYPE_IQ4_NL, { GGML_TYPE_IQ4_NL_R4, 4, (Repack::repack_func)repack_iq4_nl} },
{ GGML_TYPE_IQ2_BN, { GGML_TYPE_IQ2_BN_R4, 4, (Repack::repack_func)repack_iq2_bn} },
@@ -6387,10 +6387,10 @@ void iqk_repack_tensor(struct ggml_tensor * tensor) {
{ GGML_TYPE_Q4_K, { GGML_TYPE_Q4_K_R4, 4, (Repack::repack_func)repack_q4_k} },
{ GGML_TYPE_Q5_K, { GGML_TYPE_Q5_K_R4, 4, (Repack::repack_func)repack_q5_k} },
{ GGML_TYPE_Q6_K, { GGML_TYPE_Q6_K_R4, 4, (Repack::repack_func)repack_q6_k} },
- { GGML_TYPE_Q4_0, { GGML_TYPE_Q4_0_R4, 8, (Repack::repack_func)repack_q4_0} },
+ { GGML_TYPE_Q4_0, { GGML_TYPE_Q4_0_R8, 8, (Repack::repack_func)repack_q4_0} },
{ GGML_TYPE_Q5_0, { GGML_TYPE_Q5_0_R4, 4, (Repack::repack_func)repack_q5_0} },
{ GGML_TYPE_Q6_0, { GGML_TYPE_Q6_0_R4, 4, (Repack::repack_func)repack_q6_0} },
- { GGML_TYPE_Q8_0, { GGML_TYPE_Q8_0_R4, 8, (Repack::repack_func)repack_q8_0} },
+ { GGML_TYPE_Q8_0, { GGML_TYPE_Q8_0_R8, 8, (Repack::repack_func)repack_q8_0} },
{ GGML_TYPE_Q8_K, { GGML_TYPE_Q8_K_R8, 8, (Repack::repack_func)repack_q8_k} },
#ifdef __AVX512BF16__
{ GGML_TYPE_BF16, { GGML_TYPE_BF16_R16, 16, (Repack::repack_func)repack_bf16<ggml_bf16_t>}},