diff options
author | cebtenzzre <cebtenzzre@gmail.com> | 2023-11-01 18:04:33 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-11-01 18:04:33 -0400 |
commit | 898aeca90a9bb992f506234cf3b8b7f7fa28a1df (patch) | |
tree | 125f8a9b466efd4534ecd3e64419ece001c86a7d /examples/finetune/finetune.cpp | |
parent | c43c2da8afacaddfe51c09b21dbd9922cd0ea46b (diff) |
llama : implement YaRN RoPE scaling (#2268)
Co-authored-by: cebtenzzre <cebtenzzre@gmail.com>
Co-authored-by: Jeffrey Quesnelle <jquesnelle@gmail.com>
Diffstat (limited to 'examples/finetune/finetune.cpp')
-rw-r--r-- | examples/finetune/finetune.cpp | 5 |
1 files changed, 3 insertions, 2 deletions
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 60c7faa7..649a3b7c 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -642,8 +642,9 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs( const int rope_mode = 0; return ggml_rope_custom(ctx, - t, KQ_pos, n_rot, rope_mode, n_ctx, - rope_freq_base, rope_freq_scale); + t, KQ_pos, n_rot, rope_mode, n_ctx, 0, + rope_freq_base, rope_freq_scale, 0.0f, 0.0f, 0.0f, 0.0f + ); }; set_name(tokens_input, "tokens_input"); |