From bc6ae515ceb14eeaf198e00251a9689539cea176 Mon Sep 17 00:00:00 2001 From: saood06 Date: Fri, 9 May 2025 02:09:59 -0500 Subject: Support for Llama-3-Nemotron models (#377) * conflict resolution * Changes to make work and add longrope support * Changes to n_attention_wv rule * Untested support of 253B * DeciLMCausalModel now reads rope_theta from config.json properly * Remove errant Granite mentions * Better n_attention_vw rule * Update vocab.py --------- Co-authored-by: Yee Man Chan Co-authored-by: Iwan Kawrakow --- include/llama.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'include/llama.h') diff --git a/include/llama.h b/include/llama.h index d7376d7d..e2901861 100644 --- a/include/llama.h +++ b/include/llama.h @@ -230,7 +230,8 @@ extern "C" { LLAMA_ROPE_SCALING_TYPE_NONE = 0, LLAMA_ROPE_SCALING_TYPE_LINEAR = 1, LLAMA_ROPE_SCALING_TYPE_YARN = 2, - LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_YARN, + LLAMA_ROPE_SCALING_TYPE_LONGROPE = 3, + LLAMA_ROPE_SCALING_TYPE_MAX_VALUE = LLAMA_ROPE_SCALING_TYPE_LONGROPE, }; enum llama_pooling_type { -- cgit v1.2.3