diff options
author | Iwan Kawrakow <iwan.kawrakow@gmail.com> | 2024-06-22 11:44:00 +0300 |
---|---|---|
committer | Iwan Kawrakow <iwan.kawrakow@gmail.com> | 2024-06-22 12:02:53 +0300 |
commit | b747093582c474bfa92798d1dd17dec7b982718a (patch) | |
tree | b55fddcdac6e09427e921af630f1bd4ca7946ed9 | |
parent | 8c936e3d6593bec82975ba93bec05f9f03bb21f3 (diff) |
bitnet: qnfs tests
Q8_0 fails because as per design the reference quantization
is different from the vecdot quantization.
-rw-r--r-- | ggml-quants.c | 33 | ||||
-rw-r--r-- | iqk-quantize.cpp | 62 | ||||
-rw-r--r-- | tests/test-quantize-fns.cpp | 27 |
3 files changed, 104 insertions, 18 deletions
diff --git a/ggml-quants.c b/ggml-quants.c index f1ce1345..6821af0d 100644 --- a/ggml-quants.c +++ b/ggml-quants.c @@ -3,6 +3,9 @@ #include "ggml-quants.h" #include "ggml-impl.h" +#if GGML_USE_IQK_MULMAT +#include "iqk_mul_mat.h" +#endif #include <math.h> @@ -3801,6 +3804,11 @@ static inline __m128i get_scale_shuffle(int i) { #endif void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { +#if GGML_USE_IQK_MULMAT + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, nrc, nrc, n, GGML_TYPE_Q4_0, vx, bx, GGML_TYPE_Q8_0, vy, by, s, bs, 0, 1)) { + return; + } +#endif const int qk = QK8_0; const int nb = n / qk; @@ -4392,6 +4400,11 @@ void ggml_vec_dot_q4_0_q8_0(int n, float * restrict s, size_t bs, const void * r } void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { +#if GGML_USE_IQK_MULMAT + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, nrc, nrc, n, GGML_TYPE_Q4_1, vx, bx, GGML_TYPE_Q8_1, vy, by, s, bs, 0, 1)) { + return; + } +#endif const int qk = QK8_1; const int nb = n / qk; @@ -4683,6 +4696,11 @@ void ggml_vec_dot_q4_1_q8_1(int n, float * restrict s, size_t bs, const void * r } void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { +#if GGML_USE_IQK_MULMAT + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, nrc, nrc, n, GGML_TYPE_Q5_0, vx, bx, GGML_TYPE_Q8_0, vy, by, s, bs, 0, 1)) { + return; + } +#endif const int qk = QK8_0; const int nb = n / qk; @@ -5043,6 +5061,11 @@ void ggml_vec_dot_q5_0_q8_0(int n, float * restrict s, size_t bs, const void * r } void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { +#if GGML_USE_IQK_MULMAT + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, nrc, nrc, n, GGML_TYPE_Q5_1, vx, bx, GGML_TYPE_Q8_1, vy, by, s, bs, 0, 1)) { + return; + } +#endif const int qk = QK8_1; const int nb = n / qk; @@ -5422,6 +5445,11 @@ void ggml_vec_dot_q5_1_q8_1(int n, float * restrict s, size_t bs, const void * r } void ggml_vec_dot_q8_0_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { +#if GGML_USE_IQK_MULMAT + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, nrc, nrc, n, GGML_TYPE_Q8_0, vx, bx, GGML_TYPE_Q8_0, vy, by, s, bs, 0, 1)) { + return; + } +#endif const int qk = QK8_0; const int nb = n / qk; @@ -11798,6 +11826,11 @@ void ggml_vec_dot_iq1_m_q8_K (int n, float * restrict s, size_t bs, const void } void ggml_vec_dot_iq4_nl_q8_0(int n, float * restrict s, size_t bs, const void * restrict vx, size_t bx, const void * restrict vy, size_t by, int nrc) { +#if GGML_USE_IQK_MULMAT + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, nrc, nrc, n, GGML_TYPE_IQ4_NL, vx, bx, GGML_TYPE_Q8_0, vy, by, s, bs, 0, 1)) { + return; + } +#endif assert(nrc == 1); UNUSED(nrc); UNUSED(bx); diff --git a/iqk-quantize.cpp b/iqk-quantize.cpp index 40eff93f..c10bf02d 100644 --- a/iqk-quantize.cpp +++ b/iqk-quantize.cpp @@ -14,6 +14,7 @@ // limitations under the License. #include "iqk-quantize.h" +#include "iqk_mul_mat.h" #include "ggml-quants.h" #include "ggml-impl.h" #define GGML_COMMON_IMPL_C @@ -182,14 +183,13 @@ void dequantize_row_iq1_bn(const block_iq1_bn * x, float * y, int64_t k) { int nblock = k / QK_IQ1BN; for (int i = 0; i < nblock; ++i) { - float d = iq1bn_fp8_to_float(x[i].extra & 0xff); - uint8_t extra = x[i].extra >> 8; + uint8_t extra = x[i].extra; auto qh = x[i].qh; auto ql = x[i].ql; for (int k = 0; k < QK_IQ1BN/8; ++k) { uint16_t idx = ql[k] | ((qh[k/2] << (8 - 4*(k%2))) & 0x0f00); uint16_t val = iq1bn_grid_u16[idx]; - float dls = extra & (1 << k) ? -d : d; + float dls = extra & (1 << k) ? -1 : 1; for (int j = 0; j < 8; ++j) y[j] = dls * (((val >> 2*j) & 3) - 1); y += 8; } @@ -287,6 +287,10 @@ void ggml_vec_dot_iq1_bn_q8_K64(int n, float * s, size_t bs, const void * vx, si static_assert(QK_IQ1BN == 64, "This dot product implementation for iq1_bn requires a block size of 64"); + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, 1, 1, n, GGML_TYPE_IQ1_BN, vx, 0, GGML_TYPE_Q8_K64, vy, 0, s, 0, 0, 1)) { + return; + } + const block_iq1_bn * x = (const block_iq1_bn *)vx; const block_q8_K64 * y = (const block_q8_K64 *)vy; int nblock = n / QK_IQ1BN; @@ -322,6 +326,7 @@ void ggml_vec_dot_iq1_bn_q8_K64(int n, float * s, size_t bs, const void * vx, si void ggml_vec_dot_iq2_bn_q8_K64(int n, float * s, size_t bs, const void * vx, size_t bx, const void * vy, size_t by, int nrc) { + GGML_ASSERT(nrc == 1); GGML_UNUSED(bs); GGML_UNUSED(bx); GGML_UNUSED(by); @@ -329,29 +334,58 @@ void ggml_vec_dot_iq2_bn_q8_K64(int n, float * s, size_t bs, const void * vx, si static_assert(QK_IQ1BN == 64, "This dot product implementation for iq2_bn requires a block size of 64"); + if (iqk_mul_mat(GGML_TASK_TYPE_COMPUTE, 1, 1, n, GGML_TYPE_IQ2_BN, vx, 0, GGML_TYPE_Q8_K64, vy, 0, s, 0, 0, 1)) { + return; + } + constexpr int Nj = QK_IQ1BN/4; const block_iq2_bn * x = (const block_iq2_bn *)vx; - const block_q8_K64 * y = (const block_q8_K64 *)vy; int nblock = n / QK_IQ1BN; - float sumf = 0; + const float * d = (const float *)vy; + const int8_t * q8 = (const int8_t *)(d + 4); + + int sum[16] = { }; + int sum0[4] = { }; for (int i = 0; i < nblock; ++i) { - auto q8 = y[i].qs; - int s0 = 0, s1 = 0, s2 = 0, s3 = 0, s4 = 0; - for (int j = 0; j < Nj; ++j) { - s1 += q8[j+ 0] * (x[i].qs[j] & 0x03); - s2 += q8[j+1*Nj] * (x[i].qs[j] & 0x0c); - s3 += q8[j+2*Nj] * (x[i].qs[j] & 0x30); - s4 += q8[j+3*Nj] * (x[i].qs[j] & 0xc0); - s0 += q8[j] + q8[j+1*Nj] + q8[j+2*Nj] + q8[j+3*Nj]; + for (int j = 0; j < Nj/4; ++j) { + for (int l = 0; l < 4; ++l) { + sum[4*j + 0] += q8[4*j + l + 0] * (x[i].qs[4*j+l] & 0x03); + sum[4*j + 1] += q8[4*j + l + 1*Nj] * (x[i].qs[4*j+l] & 0x0c); + sum[4*j + 2] += q8[4*j + l + 2*Nj] * (x[i].qs[4*j+l] & 0x30); + sum[4*j + 3] += q8[4*j + l + 3*Nj] * (x[i].qs[4*j+l] & 0xc0); + sum0[j] += q8[4*j + l] + q8[4*j + l + 1*Nj] + q8[4*j + l + 2*Nj] + q8[4*j + l + 3*Nj]; + } } - sumf += y[i].d * (s1 + 0.25f*s2 + 0.0625*s3 + 0.015625*s4 - s0); + q8 += QK_IQ1BN; } + float sumf = 0; + for (int j = 0; j < 4; ++j) { + sumf += d[j] * (sum[4*j + 0] + 0.25f*sum[4*j + 1] + 0.0625*sum[4*j + 2] + 0.015625*sum[4*j + 3] - sum0[j]); + } *s = sumf; + //const block_q8_K64 * y = (const block_q8_K64 *)vy; + //float sumf = 0; + + //for (int i = 0; i < nblock; ++i) { + // auto q8 = y[i].qs; + // int s0 = 0, s1 = 0, s2 = 0, s3 = 0, s4 = 0; + // for (int j = 0; j < Nj; ++j) { + // s1 += q8[j+ 0] * (x[i].qs[j] & 0x03); + // s2 += q8[j+1*Nj] * (x[i].qs[j] & 0x0c); + // s3 += q8[j+2*Nj] * (x[i].qs[j] & 0x30); + // s4 += q8[j+3*Nj] * (x[i].qs[j] & 0xc0); + // s0 += q8[j] + q8[j+1*Nj] + q8[j+2*Nj] + q8[j+3*Nj]; + // } + // sumf += y[i].d * (s1 + 0.25f*s2 + 0.0625*s3 + 0.015625*s4 - s0); + //} + + //*s = sumf; + } void quantize_row_q8_K64_reference(const float * x, block_q8_K64 * y, int64_t k) { diff --git a/tests/test-quantize-fns.cpp b/tests/test-quantize-fns.cpp index e690ac6c..cf4664b3 100644 --- a/tests/test-quantize-fns.cpp +++ b/tests/test-quantize-fns.cpp @@ -8,6 +8,7 @@ #include <stdio.h> #include <string> #include <vector> +#include <random> #if defined(_MSC_VER) #pragma warning(disable: 4244 4267) // possible loss of data @@ -30,6 +31,14 @@ static void generate_data(float offset, size_t n, float * dst) { dst[i] = 0.1 + 2*cosf(i + offset); } } +static void generate_bitnet_data(size_t n, float * dst) { + std::mt19937 rndm(1234); + for (size_t i = 0; i < n; i++) { + auto r = rndm(); + dst[i] = r > std::mt19937::max()/2 ? 0.f : r < std::mt19937::max()/4 ? -1.f : 1.f; + } +} + // Calculate RMSE between two float arrays static float array_rmse(const float * a1, const float * a2, size_t n) { @@ -83,7 +92,7 @@ static float dot_product_error( auto vdot = ggml_internal_get_type_traits(qfns.vec_dot_type); - qfns.from_float(test_data1, tmp_q1.data(), test_size); + qfns.from_float_reference(test_data1, tmp_q1.data(), test_size); vdot.from_float(test_data2, tmp_q2.data(), test_size); float result = INFINITY; @@ -112,9 +121,11 @@ int main(int argc, char * argv[]) { std::vector<float> test_data(test_size); std::vector<float> test_data2(test_size); + std::vector<float> test_data_bitnet(test_size); generate_data(0.0, test_data.size(), test_data.data()); generate_data(1.0, test_data2.size(), test_data2.data()); + generate_bitnet_data(test_data_bitnet.size(), test_data_bitnet.data()); // Initialize GGML, ensures float conversion tables are initialized struct ggml_init_params ggml_params = { @@ -136,13 +147,21 @@ int main(int argc, char * argv[]) { continue; } + auto test_data_quantize = test_data.data(); + auto test_data_vecdot = test_data2.data(); const ggml_type ei = (ggml_type)i; + if (ei == GGML_TYPE_IQ1_BN || ei == GGML_TYPE_IQ2_BN) { + test_data_quantize = test_data_bitnet.data(); + test_data_vecdot = test_data_bitnet.data(); + //printf("Skipping %s because test data does not satisfy Bitnet requirements\n", ggml_type_name(ei)); + //continue; + } printf("Testing %s\n", ggml_type_name((ggml_type) i)); ggml_quantize_init(ei); if (qfns.from_float && qfns.to_float) { - const float total_error = total_quantization_error(qfns, test_size, test_data.data()); + const float total_error = total_quantization_error(qfns, test_size, test_data_quantize); const float max_quantization_error = type == GGML_TYPE_Q2_K ? MAX_QUANTIZATION_TOTAL_ERROR_2BITS : type == GGML_TYPE_IQ2_S ? MAX_QUANTIZATION_TOTAL_ERROR_2BITS : @@ -155,14 +174,14 @@ int main(int argc, char * argv[]) { printf("%5s absolute quantization error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], total_error); } - const float reference_error = reference_quantization_error(qfns, test_size, test_data.data()); + const float reference_error = reference_quantization_error(qfns, test_size, test_data_quantize); failed = !(reference_error < MAX_QUANTIZATION_REFERENCE_ERROR); num_failed += failed; if (failed || verbose) { printf("%5s reference implementation error: %s (%f)\n", ggml_type_name(type), RESULT_STR[failed], reference_error); } - const float vec_dot_error = dot_product_error(qfns, test_size, test_data.data(), test_data2.data()); + const float vec_dot_error = dot_product_error(qfns, test_size, test_data.data(), test_data_vecdot); const float max_allowed_error = type == GGML_TYPE_Q2_K || type == GGML_TYPE_IQ2_XS || type == GGML_TYPE_IQ2_XXS || type == GGML_TYPE_IQ3_XXS || type == GGML_TYPE_IQ3_S || type == GGML_TYPE_IQ2_S ? MAX_DOT_PRODUCT_ERROR_LOWBIT |