diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2024-04-08 16:23:01 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-08 16:23:01 +0300 |
commit | b73e564b16086845a8b4fffd26e22685d3e0c3db (patch) | |
tree | fff3aea250e4ccaa09e4d928a2aad92e1ed8b38c | |
parent | e3c337d87ca650972105a51c6ce302dd236c07ad (diff) |
quantize : fix precedence of cli args (#6541)
-rw-r--r-- | llama.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
@@ -13562,10 +13562,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s if (!params->pure && ggml_is_quantized(default_type)) { new_type = llama_tensor_get_type(qs, new_type, tensor, ftype); } - else if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) { + if (params->token_embedding_type < GGML_TYPE_COUNT && strcmp(tensor->name, "token_embd.weight") == 0) { new_type = params->token_embedding_type; } - else if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) { + if (params->output_tensor_type < GGML_TYPE_COUNT && strcmp(tensor->name, "output.weight") == 0) { new_type = params->output_tensor_type; } |