summaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorJared Van Bortel <jared@nomic.ai>2024-02-03 06:22:06 -0500
committerGitHub <noreply@github.com>2024-02-03 13:22:06 +0200
commit1ec3332ade60aeb1494ace2211cf1a966db6d770 (patch)
tree6d676bd61cf6ffcd201fdbfe63adae89d961a235 /llama.h
parent6a66c5071a74a96c4f52cf1015a092acd18c3714 (diff)
YaRN : store rope scaling type as int32_t in memory (#5285)
* YaRN : store rope scaling type as int32_t in memory * llama : store mapped names as const char *
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h2
1 files changed, 1 insertions, 1 deletions
diff --git a/llama.h b/llama.h
index 9a60e9bf..cec4158b 100644
--- a/llama.h
+++ b/llama.h
@@ -213,7 +213,7 @@ extern "C" {
uint32_t n_batch; // prompt processing maximum batch size
uint32_t n_threads; // number of threads to use for generation
uint32_t n_threads_batch; // number of threads to use for batch processing
- int8_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
+ int32_t rope_scaling_type; // RoPE scaling type, from `enum llama_rope_scaling_type`
// ref: https://github.com/ggerganov/llama.cpp/pull/2054
float rope_freq_base; // RoPE base frequency, 0 = from model