summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--convert_hf_to_gguf.py14
-rwxr-xr-xconvert_hf_to_gguf_update.py1
-rw-r--r--include/llama.h1
-rw-r--r--src/llama-vocab.cpp7
-rw-r--r--src/llama.cpp4
5 files changed, 27 insertions, 0 deletions
diff --git a/convert_hf_to_gguf.py b/convert_hf_to_gguf.py
index 1754e486..33be63fa 100644
--- a/convert_hf_to_gguf.py
+++ b/convert_hf_to_gguf.py
@@ -636,6 +636,9 @@ class Model:
if chkhsh == "877081d19cf6996e2c4ff0e1236341e9b7bde288f5311a56a937f0afbbb3aeb5":
# ref: https://huggingface.co/deepseek-ai/DeepSeek-V3
res = "deepseek-v3"
+ if chkhsh == "d5f1dd6f980fec569fb218a81a7658ac45fc56b38c5a0adeb1c232fbe04ef5ec":
+ # ref: https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base
+ res = "seed-coder"
if res is None:
logger.warning("\n")
@@ -1520,6 +1523,17 @@ class LlamaModel(Model):
special_vocab._set_special_token("eot", 32010)
special_vocab.add_to_gguf(self.gguf_writer)
+ # Apply to Seed-Coder only (and ignore otherwise)
+ if self.hparams.get("vocab_size", 32000) == 155136:
+ special_vocab = gguf.SpecialVocab(
+ self.dir_model, load_merges=False,
+ special_token_types = ['prefix', 'suffix', 'middle', 'eot']
+ )
+ special_vocab._set_special_token("prefix", 124)
+ special_vocab._set_special_token("suffix", 125)
+ special_vocab._set_special_token("middle", 126)
+ special_vocab.add_to_gguf(self.gguf_writer)
+
def set_gguf_parameters(self):
super().set_gguf_parameters()
hparams = self.hparams
diff --git a/convert_hf_to_gguf_update.py b/convert_hf_to_gguf_update.py
index 40af02f4..f2e6cc37 100755
--- a/convert_hf_to_gguf_update.py
+++ b/convert_hf_to_gguf_update.py
@@ -95,6 +95,7 @@ models = [
{"name": "tekken", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/mistralai/Mistral-Nemo-Base-2407", },
{"name": "smollm", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/HuggingFaceTB/SmolLM-135M", },
{"name": "deepseek-v3", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/deepseek-ai/DeepSeek-V3"},
+ {"name": "seed-coder", "tokt": TOKENIZER_TYPE.BPE, "repo": "https://huggingface.co/ByteDance-Seed/Seed-Coder-8B-Base", },
]
diff --git a/include/llama.h b/include/llama.h
index 6c8bff95..51db3eab 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -110,6 +110,7 @@ extern "C" {
LLAMA_VOCAB_PRE_TYPE_LLAMA4 = 33,
LLAMA_VOCAB_PRE_TYPE_FALCON_3 = 34,
LLAMA_VOCAB_PRE_TYPE_FALCON_E = 35,
+ LLAMA_VOCAB_PRE_TYPE_SEED_CODER = 36, //llama.cpp lists this as 35
};
// note: these values should be synchronized with ggml_rope
diff --git a/src/llama-vocab.cpp b/src/llama-vocab.cpp
index 65ca5e38..474cbd8c 100644
--- a/src/llama-vocab.cpp
+++ b/src/llama-vocab.cpp
@@ -477,6 +477,13 @@ struct llm_tokenizer_bpe {
"'(?:[sSdDmMtT]|[lL][lL]|[vV][eE]|[rR][eE])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}| ?[^\\s\\p{L}\\p{N}]+[\\r\\n]*|\\s*[\\r\\n]|\\s+(?!\\S)|\\s+",
};
break;
+ case LLAMA_VOCAB_PRE_TYPE_SEED_CODER:
+ regex_exprs = {
+ // original regex from tokenizer.json
+ // "(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1}| ?[^\\s\\p{L}\\p{N}\r\n]+|\\s*[\r\n]+|\\s+(?!\\S)|\\s+"
+ "(?:'[sS]|'[tT]|'[rR][eE]|'[vV][eE]|'[mM]|'[lL][lL]|'[dD])|[^\\r\\n\\p{L}\\p{N}]?\\p{L}+|\\p{N}{1}| ?[^\\s\\p{L}\\p{N}\\r\\n]+|\\s*[\\r\\n]+|\\s+(?!\\S)|\\s+",
+ };
+ break;
default:
// default regex for BPE tokenization pre-processing
regex_exprs = {
diff --git a/src/llama.cpp b/src/llama.cpp
index 3ee95939..564304f6 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -6302,6 +6302,10 @@ static void llm_load_vocab(
tokenizer_pre == "bailingmoe") {
vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_BAILINGMOE;
vocab.tokenizer_clean_spaces = false;
+ } else if (
+ tokenizer_pre == "seed-coder") {
+ vocab.type_pre = LLAMA_VOCAB_PRE_TYPE_SEED_CODER;
+ vocab.tokenizer_clean_spaces = false;
} else {
throw std::runtime_error(format("unknown pre-tokenizer type: '%s'", tokenizer_pre.c_str()));
}