summaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorMichael Podvitskiy <podvitskiymichael@gmail.com>2024-03-14 17:21:56 +0100
committerGitHub <noreply@github.com>2024-03-14 18:21:56 +0200
commit69ff61397d2b7b550dcdda4a35b35128892408b0 (patch)
tree70b5bac95a58dcf734e8035c5fd47b92ceb510a1 /llama.h
parent044ec4b2a567f649459ccd20af2f387c784faa51 (diff)
llama : support models without vocabulary (#5798)
* additional methods to read model and ctx parameters * vocab size as a part of a model metadata * models without vocabulary, convert.py part * models without vocabulary, llama.cpp part * PR clean up * converter scrypt fixes * llama_vocab_type update (renamed the new key) * pr review fixes * revert function renaming * one more NoVocab assert
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h7
1 files changed, 4 insertions, 3 deletions
diff --git a/llama.h b/llama.h
index 2d16cc9b..90aa5372 100644
--- a/llama.h
+++ b/llama.h
@@ -59,9 +59,10 @@ extern "C" {
typedef int32_t llama_seq_id;
enum llama_vocab_type {
- LLAMA_VOCAB_TYPE_SPM = 0, // SentencePiece
- LLAMA_VOCAB_TYPE_BPE = 1, // Byte Pair Encoding
- LLAMA_VOCAB_TYPE_WPM = 2, // WordPiece
+ LLAMA_VOCAB_TYPE_NONE = 0, // For models without vocab
+ LLAMA_VOCAB_TYPE_SPM = 1, // SentencePiece
+ LLAMA_VOCAB_TYPE_BPE = 2, // Byte Pair Encoding
+ LLAMA_VOCAB_TYPE_WPM = 3, // WordPiece
};
// note: these values should be synchronized with ggml_rope