From 69a6735087c3634963c642fd69f0851ac479cd78 Mon Sep 17 00:00:00 2001 From: Galunid Date: Mon, 23 Oct 2023 21:46:00 +0200 Subject: Update special token handling in conversion scripts for gpt2 derived tokenizers (#3746) We still have the heads up in `README.md` regarding `bpe` tokenizers and this patch is needed for - a couple of tokenizer tests - some more `special` and `non-special` added tokens handling (as far as I understand it) * Update special token handling * Add mpt --- convert-mpt-hf-to-gguf.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'convert-mpt-hf-to-gguf.py') diff --git a/convert-mpt-hf-to-gguf.py b/convert-mpt-hf-to-gguf.py index 2d2fa232..70d154b3 100755 --- a/convert-mpt-hf-to-gguf.py +++ b/convert-mpt-hf-to-gguf.py @@ -136,9 +136,11 @@ for i in range(vocab_size): tokens.append(f"[PAD{i}]") toktypes.append(gguf.TokenType.USER_DEFINED) elif reverse_vocab[i] in added_vocab: - # NOTE: wouldn't we like to distinguish CONTROL tokens here? tokens.append(reverse_vocab[i]) - toktypes.append(gguf.TokenType.USER_DEFINED) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) else: tokens.append(reverse_vocab[i]) toktypes.append(gguf.TokenType.NORMAL) -- cgit v1.2.3