From c85e139c68b0b0b15656cc0bc9618d632ed18822 Mon Sep 17 00:00:00 2001 From: Iwan Kawrakow Date: Sun, 28 Jul 2024 19:43:18 +0300 Subject: iq2_k: Basics Quantize/dequantize, CUDA deqantize, AVX512 iqk_mul_mat. --- examples/quantize/quantize.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'examples') diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp index 2397e202..5f599c65 100644 --- a/examples/quantize/quantize.cpp +++ b/examples/quantize/quantize.cpp @@ -40,6 +40,7 @@ static const std::vector QUANT_OPTIONS = { { "Q3_K_L", LLAMA_FTYPE_MOSTLY_Q3_K_L, " 3.35G, +0.1764 ppl @ LLaMA-v1-7B", }, { "IQ4_NL", LLAMA_FTYPE_MOSTLY_IQ4_NL, " 4.50 bpw non-linear quantization", }, { "IQ4_XS", LLAMA_FTYPE_MOSTLY_IQ4_XS, " 4.25 bpw non-linear quantization", }, + { "IQ2_K", LLAMA_FTYPE_MOSTLY_IQ2_K, " 2.375 bpw non-linear quantization",}, { "IQ4_K", LLAMA_FTYPE_MOSTLY_IQ4_K, " 4.5 bpw non-linear quantization", }, { "Q4_K", LLAMA_FTYPE_MOSTLY_Q4_K_M, "alias for Q4_K_M", }, { "Q4_K_S", LLAMA_FTYPE_MOSTLY_Q4_K_S, " 3.59G, +0.0992 ppl @ LLaMA-v1-7B", }, -- cgit v1.2.3