summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorIwan Kawrakow <iwan.kawrakow@gmail.com>2024-06-26 12:05:57 +0200
committerIwan Kawrakow <iwan.kawrakow@gmail.com>2024-06-26 12:05:57 +0200
commit71725a918f9edee559a978397779486dce7c703a (patch)
treea6700309b0c4eadd6ca3b992458e933b2b2b2a76
parent641dd6bc6883a3a3de27d6f6ccac3e489ec7641c (diff)
bitnet: fold V scale into rms_norm
-rw-r--r--llama.cpp5
1 files changed, 3 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index ddc58683..2bebf894 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -11870,11 +11870,12 @@ struct llm_build_context {
// B1.V
struct ggml_tensor * Vcur = ggml_mul_mat(ctx0, model.layers[il].wv, cur);
float v_scale; std::memcpy(&v_scale, model.layers[il].wv->op_params, sizeof(float));
- Vcur = ggml_scale(ctx0, Vcur, v_scale);
cb(Vcur, "Vcur", il);
if (model.layers[il].bv) {
+ Vcur = ggml_scale(ctx0, Vcur, v_scale);
Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
cb(Vcur, "Vcur", il);
+ v_scale = 1;
}
Qcur = ggml_rope_ext(
@@ -11960,7 +11961,7 @@ struct llm_build_context {
cur_attn = llm_build_norm(ctx0, cur_attn, hparams,
model.layers[il].attn_sub_norm, NULL,
- LLM_NORM_RMS, cb, il); //, 1/(v_scale*v_scale));
+ LLM_NORM_RMS, cb, il, 1/(v_scale*v_scale));
cb(cur_attn, "attn_sub_norm", il);
ggml_build_forward_expand(gf, cur_attn);