summaryrefslogtreecommitdiff
path: root/include/llama.h
diff options
context:
space:
mode:
authorsaood06 <saood05@gmail.com>2025-05-09 02:09:59 -0500
committerGitHub <noreply@github.com>2025-05-09 10:09:59 +0300
commitbc6ae515ceb14eeaf198e00251a9689539cea176 (patch)
tree82ca4aa8afa0dce38e0f3cb6fa9c7a78ec0065d2 /include/llama.h
parent4084ca7331611da4426d781a15a6ffa68312759e (diff)
Support for Llama-3-Nemotron models (#377)
* conflict resolution * Changes to make work and add longrope support * Changes to n_attention_wv rule * Untested support of 253B * DeciLMCausalModel now reads rope_theta from config.json properly * Remove errant Granite mentions * Better n_attention_vw rule * Update vocab.py --------- Co-authored-by: Yee Man Chan <ymchan@gmail.com> Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'include/llama.h')
-rw-r--r--include/llama.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/include/llama.h b/include/llama.h
index d7376d7d..e2901861 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -230,7 +230,8 @@ extern "C" {
LLAMA_ROPE_SCALING_TYPE_NONE = 0,
LLAMA_ROPE_SCALING_TYPE_LINEAR = 1,
LLAMA_ROPE_SCALING_TYPE_YARN = 2,
- LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN,
+ LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3,
+ LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE,
};
enum llama_pooling_type {