summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-01-17 12:36:37 +0200
committerGitHub <noreply@github.com>2024-01-17 12:36:37 +0200
commit2b3a665d3917edf393761a24c4835447894df74a (patch)
tree254942a7222314ac60c406842912ae092d724000 /llama.cpp
parent75632936659772d5b2ce54b0b65319fecbaac2e6 (diff)
llama : use Q4_K for attn_v for Q2_K_S when n_gqa >= 4 (#4996)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp7
1 files changed, 6 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index 765d20dd..2c5983c6 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -8477,7 +8477,12 @@ static ggml_type get_k_quant_type(quantize_state_internal & qs, ggml_type new_ty
}
else if (name == "token_embd.weight") new_type = GGML_TYPE_Q2_K;
} else if (name.find("attn_v.weight") != std::string::npos) {
- if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) new_type = GGML_TYPE_Q3_K;
+ if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K) {
+ new_type = qs.model.hparams.n_gqa() >= 4 ? GGML_TYPE_Q4_K : GGML_TYPE_Q3_K;
+ }
+ else if (ftype == LLAMA_FTYPE_MOSTLY_Q2_K_S && qs.model.hparams.n_gqa() >= 4) {
+ new_type = GGML_TYPE_Q4_K;
+ }
else if (ftype == LLAMA_FTYPE_MOSTLY_Q3_K_M) {
new_type = qs.i_attention_wv < 2 ? GGML_TYPE_Q5_K : GGML_TYPE_Q4_K;
}