From 8ad84b9fab9570c36220cb791f9a67a4d2c7fd2f Mon Sep 17 00:00:00 2001 From: Nexes the Elder <124105151+Nexesenex@users.noreply.github.com> Date: Thu, 21 Nov 2024 07:12:57 +0100 Subject: Use Q6_0 instead of Q5_1 for tensors incompatible with IQ5_K/Q5_K (#116) --- src/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/llama.cpp b/src/llama.cpp index 2b9a1b1a..61448319 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -16048,7 +16048,7 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n case GGML_TYPE_IQ4_K: case GGML_TYPE_Q4_K: new_type = GGML_TYPE_Q5_0; break; case GGML_TYPE_IQ5_K: - case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q5_1; break; + case GGML_TYPE_Q5_K: new_type = GGML_TYPE_Q6_0; break; case GGML_TYPE_IQ6_K: case GGML_TYPE_Q6_K: new_type = GGML_TYPE_Q8_0; break; default: throw std::runtime_error("\nUnsupported tensor size encountered\n"); -- cgit v1.2.3