diff options
author | DAN™ <dranger003@gmail.com> | 2024-05-08 06:43:23 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-08 13:43:23 +0300 |
commit | 4cd621c26de2095cd7c4464bdec5fe2e696ef3f3 (patch) | |
tree | 9949775dc9a6ae5c5d1d0370137b1f2bfaf86f62 /llama.cpp | |
parent | 7e0b6a7b3ba94ff624dc27c1e0e735fded8819b8 (diff) |
convert : add BPE pre-tokenization for DBRX (#7132)
* Add BPE pre-tokenization for DBRX.
* Add vocab GGUFs.
* Remove test.
* Remove GGUFs.
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 4 |
1 files changed, 4 insertions, 0 deletions
@@ -4394,6 +4394,9 @@ static void llm_load_vocab( } else if ( tokenizer_pre == "olmo") { vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_OLMO; + } else if ( + tokenizer_pre == "dbrx") { + vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_DBRX; } else { throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str())); } @@ -12200,6 +12203,7 @@ struct llm_tokenizer_bpe { case LLAMA_VOCAB_TYPE_BPE: switch (vocab.type_pre) { case LLAMA_VOCAB_PRE_TYPE_LLAMA3: + case LLAMA_VOCAB_PRE_TYPE_DBRX: word_collection = unicode_regex_split(text, { // original regex from tokenizer.json //"(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1,3}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+", |