diff options
| author | Galunid <karolek1231456@gmail.com> | 2023-10-23 21:46:00 +0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2023-10-23 21:46:00 +0200 |
| commit | 69a6735087c3634963c642fd69f0851ac479cd78 (patch) | |
| tree | b4a0a0037e1d748e1dbf531ab5c83da5fcd7bd2e /convert-gptneox-hf-to-gguf.py | |
| parent | 5be6c803fa5378f62a1590f3ad8c6b64c7c0c2ce (diff) | |
Update special token handling in conversion scripts for gpt2 derived tokenizers (#3746)
We still have the heads up in `README.md` regarding `bpe` tokenizers and this patch is needed for
- a couple of tokenizer tests
- some more `special` and `non-special` added tokens handling (as far as I understand it)
* Update special token handling
* Add mpt
Diffstat (limited to 'convert-gptneox-hf-to-gguf.py')
| -rwxr-xr-x | convert-gptneox-hf-to-gguf.py | 17 |
1 files changed, 13 insertions, 4 deletions
diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index f1599b0c..02d1fdf1 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -123,15 +123,24 @@ tokenizer = AutoTokenizer.from_pretrained(dir_model) vocab_size = hparams.get("vocab_size", len(tokenizer.vocab)) assert max(tokenizer.vocab.values()) < vocab_size +added_vocab = tokenizer.get_added_vocab() reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()} for i in range(vocab_size): - tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]") - scores.append(0.0) # dummy - toktypes.append(gguf.TokenType.NORMAL) + if i not in reverse_vocab: + tokens.append(f"[PAD{i}]") + toktypes.append(gguf.TokenType.USER_DEFINED) + elif reverse_vocab[i] in added_vocab: + tokens.append(reverse_vocab[i]) + if tokenizer.added_tokens_decoder[i].special: + toktypes.append(gguf.TokenType.CONTROL) + else: + toktypes.append(gguf.TokenType.USER_DEFINED) + else: + tokens.append(reverse_vocab[i]) + toktypes.append(gguf.TokenType.NORMAL) gguf_writer.add_token_list(tokens) -gguf_writer.add_token_scores(scores) gguf_writer.add_token_types(toktypes) special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens)) |
