summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-04-08 22:25:49 +0300
committerGitHub <noreply@github.com>2024-04-08 22:25:49 +0300
commitcc4a95426d17417d3c83f12bdb514fbe8abe2a88 (patch)
tree31d35c2e8430484dba54f675ab6b711773f72ad3
parentcecd8d3c98b48f51aaa1d4c729e55bd319f6799c (diff)
llama : fix attention layer count sanity check (#6550)
* llama : fix attention layer count sanity check * llama : fix parentheses in attention layer count sanity check There was otherwise a warning when compiling. --------- Co-authored-by: Francis Couture-Harpin <git@compilade.net>
-rw-r--r--llama.cpp9
1 files changed, 7 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 89ea3fe1..b16ddc64 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -13468,7 +13468,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
const std::string name = ggml_get_name(meta);
// TODO: avoid hardcoded tensor names - use the TN_* constants
- if (name.find("attn_v.weight") != std::string::npos || name.find("attn_qkv.weight") != std::string::npos) {
+ if (name.find("attn_v.weight") != std::string::npos ||
+ name.find("attn_qkv.weight") != std::string::npos) {
++qs.n_attention_wv;
} else if (name == LLM_TN(model.arch)(LLM_TENSOR_OUTPUT, "weight")) {
qs.has_output = true;
@@ -13478,7 +13479,11 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
qs.n_ffn_down = qs.n_ffn_gate = qs.n_ffn_up = (int)model.hparams.n_layer;
// sanity checks
- GGML_ASSERT(qs.n_attention_wv == (int)model.hparams.n_layer && "n_attention_wv != n_layer is unexpected");
+ //
+ // - qs.n_attention_wv == 0 for Mamba models
+ // - qs.n_attention_wv == model.hparams.n_layer for Transformer models
+ //
+ GGML_ASSERT((qs.n_attention_wv == 0 || qs.n_attention_wv == (int)model.hparams.n_layer) && "n_attention_wv is unexpected");
size_t total_size_org = 0;
size_t total_size_new = 0;