From 5b09797321430f08caf0473143a962916ab2ea89 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sat, 9 Mar 2024 15:53:59 +0200 Subject: ggml : remove old quantization functions (#5942) * ggml : remove old quantization functions ggml-ci * ggml : simplify ggml_quantize_chunk ggml-ci * ggml : restrict correctness ggml-ci * ggml : remove hist data from the quantization API ggml-ci * tests : remove hist usage in test-backend-ops ggml-ci * vulkan : remove hist and fix typo --- examples/llava/clip.cpp | 55 +------------------------------------------------ 1 file changed, 1 insertion(+), 54 deletions(-) (limited to 'examples/llava/clip.cpp') diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index ef9e4ba7..6653b815 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -1862,7 +1862,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i std::vector work(512); std::vector conv_buf(512); - std::vector hist_all(1 << 4, 0); size_t total_size_org = 0; size_t total_size_new = 0; @@ -1917,48 +1916,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i } new_data = work.data(); - std::vector hist_cur(1 << 4, 0); - - switch (new_type) { - case GGML_TYPE_Q4_0: { - new_size = ggml_quantize_q4_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_1: { - new_size = ggml_quantize_q4_1(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q5_0: { - new_size = ggml_quantize_q5_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q5_1: { - new_size = ggml_quantize_q5_1(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q8_0: { - new_size = ggml_quantize_q8_0(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q2_K: { - new_size = ggml_quantize_q2_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q3_K: { - new_size = ggml_quantize_q3_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q4_K: { - new_size = ggml_quantize_q4_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q5_K: { - new_size = ggml_quantize_q5_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - case GGML_TYPE_Q6_K: { - new_size = ggml_quantize_q6_K(f32_data, new_data, n_elms, cur->ne[0], hist_cur.data()); - } break; - default: { - fprintf(stderr, "%s: unsupported quantization type %d\n", __func__, new_type); - return false; - } - } - - for (size_t j = 0; j < hist_cur.size(); ++j) { - hist_all[j] += hist_cur[j]; - } + new_size = ggml_quantize_chunk(new_type, f32_data, new_data, 0, n_elms/cur->ne[0], cur->ne[0], nullptr); } else { new_type = cur->type; new_data = cur->data; @@ -1993,17 +1951,6 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i { printf("%s: original size = %8.2f MB\n", __func__, total_size_org / 1024.0 / 1024.0); printf("%s: quantized size = %8.2f MB\n", __func__, total_size_new / 1024.0 / 1024.0); - - int64_t sum_all = 0; - for (size_t i = 0; i < hist_all.size(); ++i) { - sum_all += hist_all[i]; - } - - printf("%s: hist: ", __func__); - for (size_t i = 0; i < hist_all.size(); ++i) { - printf("%5.3f ", hist_all[i] / (float)sum_all); - } - printf("\n"); } return true; -- cgit v1.2.3