summaryrefslogtreecommitdiff
path: root/convert-starcoder-hf-to-gguf.py
diff options
context:
space:
mode:
authorGalunid <karolek1231456@gmail.com>2023-10-23 21:46:00 +0200
committerGitHub <noreply@github.com>2023-10-23 21:46:00 +0200
commit69a6735087c3634963c642fd69f0851ac479cd78 (patch)
treeb4a0a0037e1d748e1dbf531ab5c83da5fcd7bd2e /convert-starcoder-hf-to-gguf.py
parent5be6c803fa5378f62a1590f3ad8c6b64c7c0c2ce (diff)
Update special token handling in conversion scripts for gpt2 derived tokenizers (#3746)
We still have the heads up in `README.md` regarding `bpe` tokenizers and this patch is needed for - a couple of tokenizer tests - some more `special` and `non-special` added tokens handling (as far as I understand it) * Update special token handling * Add mpt
Diffstat (limited to 'convert-starcoder-hf-to-gguf.py')
-rwxr-xr-xconvert-starcoder-hf-to-gguf.py18
1 files changed, 13 insertions, 5 deletions
diff --git a/convert-starcoder-hf-to-gguf.py b/convert-starcoder-hf-to-gguf.py
index fe8815cb..a9bfed85 100755
--- a/convert-starcoder-hf-to-gguf.py
+++ b/convert-starcoder-hf-to-gguf.py
@@ -111,17 +111,25 @@ tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size
+added_vocab = tokenizer.get_added_vocab()
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
for i in range(vocab_size):
- tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]")
- scores.append(0.0) # dummy
- toktypes.append(gguf.TokenType.NORMAL)
+ if i not in reverse_vocab:
+ tokens.append(f"[PAD{i}]")
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ elif reverse_vocab[i] in added_vocab:
+ tokens.append(reverse_vocab[i])
+ if tokenizer.added_tokens_decoder[i].special:
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ else:
+ tokens.append(reverse_vocab[i])
+ toktypes.append(gguf.TokenType.NORMAL)
gguf_writer.add_token_list(tokens)
-gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)
-
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)