summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorcompilade <113953597+compilade@users.noreply.github.com>2024-02-28 03:52:56 -0500
committerGitHub <noreply@github.com>2024-02-28 10:52:56 +0200
commitadcb12a9bad87bc96f2f158c95892b3d04aa7ffb (patch)
tree5b32e4c68a55d074658f4658da25fb5583f7889b
parent177628bfd85565070916ad66a5ac4071ee0527d8 (diff)
llama : fix non-quantization of expert gating tensors (#5754)
This reverts a single line from #5475
-rw-r--r--llama.cpp3
1 files changed, 2 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index 356ca107..893bcdbc 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -11162,7 +11162,8 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s
quantize &= !params->only_copy;
// do not quantize expert gating tensors
- quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_FFN_GATE_INP, "weight");
+ // NOTE: can't use LLM_TN here because the layer number is not known
+ quantize &= name.find("ffn_gate_inp.weight") == std::string::npos;
// do not quantize positional embeddings and token types (BERT)
quantize &= name != LLM_TN(model.arch)(LLM_TENSOR_POS_EMBD, "weight");