diff options
author | xaedes <xaedes@gmail.com> | 2023-10-02 15:15:45 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-02 16:15:45 +0300 |
commit | a03ce38455544121c5c00cf845def1443acd6ac8 (patch) | |
tree | 7dc09b33cace5a34a058dc99f806b7cad2274b53 | |
parent | a84767698495d72e44044f1f6db1c1cc721bfd15 (diff) |
finetune : fix #3404 (#3437)
the shapes for init model of gqa models was wrong
-rw-r--r-- | examples/finetune/finetune.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 8ca1874d..9ae4bc19 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -332,8 +332,8 @@ static void init_model(struct llama_model * input, struct my_llama_model * model assert_shape_1d(layer.attention_norm, hparams.n_embd); assert_shape_2d(layer.wq, hparams.n_embd, hparams.n_embd); - assert_shape_2d(layer.wk, hparams.n_embd, hparams.n_embd); - assert_shape_2d(layer.wv, hparams.n_embd, hparams.n_embd); + assert_shape_2d(layer.wk, hparams.n_embd, hparams.n_embd_gqa()); + assert_shape_2d(layer.wv, hparams.n_embd, hparams.n_embd_gqa()); assert_shape_2d(layer.wo, hparams.n_embd, hparams.n_embd); assert_shape_1d(layer.ffn_norm, hparams.n_embd); assert_shape_2d(layer.w1, hparams.n_embd, hparams.n_ff); |