summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGalunid <karolek1231456@gmail.com>2023-10-23 21:46:00 +0200
committerGitHub <noreply@github.com>2023-10-23 21:46:00 +0200
commit69a6735087c3634963c642fd69f0851ac479cd78 (patch)
treeb4a0a0037e1d748e1dbf531ab5c83da5fcd7bd2e
parent5be6c803fa5378f62a1590f3ad8c6b64c7c0c2ce (diff)
Update special token handling in conversion scripts for gpt2 derived tokenizers (#3746)
We still have the heads up in `README.md` regarding `bpe` tokenizers and this patch is needed for - a couple of tokenizer tests - some more `special` and `non-special` added tokens handling (as far as I understand it) * Update special token handling * Add mpt
-rwxr-xr-xconvert-bloom-hf-to-gguf.py17
-rwxr-xr-xconvert-gptneox-hf-to-gguf.py17
-rwxr-xr-xconvert-mpt-hf-to-gguf.py6
-rwxr-xr-xconvert-refact-hf-to-gguf.py17
-rwxr-xr-xconvert-starcoder-hf-to-gguf.py18
5 files changed, 56 insertions, 19 deletions
diff --git a/convert-bloom-hf-to-gguf.py b/convert-bloom-hf-to-gguf.py
index 14dbd793..6e866d94 100755
--- a/convert-bloom-hf-to-gguf.py
+++ b/convert-bloom-hf-to-gguf.py
@@ -118,15 +118,24 @@ tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size
+added_vocab = tokenizer.get_added_vocab()
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
for i in range(vocab_size):
- tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]")
- scores.append(0.0) # dummy
- toktypes.append(gguf.TokenType.NORMAL)
+ if i not in reverse_vocab:
+ tokens.append(f"[PAD{i}]")
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ elif reverse_vocab[i] in added_vocab:
+ tokens.append(reverse_vocab[i])
+ if tokenizer.added_tokens_decoder[i].special:
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ else:
+ tokens.append(reverse_vocab[i])
+ toktypes.append(gguf.TokenType.NORMAL)
gguf_writer.add_token_list(tokens)
-gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)
special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens))
diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py
index f1599b0c..02d1fdf1 100755
--- a/convert-gptneox-hf-to-gguf.py
+++ b/convert-gptneox-hf-to-gguf.py
@@ -123,15 +123,24 @@ tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size
+added_vocab = tokenizer.get_added_vocab()
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
for i in range(vocab_size):
- tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]")
- scores.append(0.0) # dummy
- toktypes.append(gguf.TokenType.NORMAL)
+ if i not in reverse_vocab:
+ tokens.append(f"[PAD{i}]")
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ elif reverse_vocab[i] in added_vocab:
+ tokens.append(reverse_vocab[i])
+ if tokenizer.added_tokens_decoder[i].special:
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ else:
+ tokens.append(reverse_vocab[i])
+ toktypes.append(gguf.TokenType.NORMAL)
gguf_writer.add_token_list(tokens)
-gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
diff --git a/convert-mpt-hf-to-gguf.py b/convert-mpt-hf-to-gguf.py
index 2d2fa232..70d154b3 100755
--- a/convert-mpt-hf-to-gguf.py
+++ b/convert-mpt-hf-to-gguf.py
@@ -136,9 +136,11 @@ for i in range(vocab_size):
tokens.append(f"[PAD{i}]")
toktypes.append(gguf.TokenType.USER_DEFINED)
elif reverse_vocab[i] in added_vocab:
- # NOTE: wouldn't we like to distinguish CONTROL tokens here?
tokens.append(reverse_vocab[i])
- toktypes.append(gguf.TokenType.USER_DEFINED)
+ if tokenizer.added_tokens_decoder[i].special:
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ toktypes.append(gguf.TokenType.USER_DEFINED)
else:
tokens.append(reverse_vocab[i])
toktypes.append(gguf.TokenType.NORMAL)
diff --git a/convert-refact-hf-to-gguf.py b/convert-refact-hf-to-gguf.py
index 934f3852..f0cfe84d 100755
--- a/convert-refact-hf-to-gguf.py
+++ b/convert-refact-hf-to-gguf.py
@@ -139,15 +139,24 @@ tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size
+added_vocab = tokenizer.get_added_vocab()
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
for i in range(vocab_size):
- tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]")
- scores.append(0.0) # dummy
- toktypes.append(gguf.TokenType.NORMAL)
+ if i not in reverse_vocab:
+ tokens.append(f"[PAD{i}]")
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ elif reverse_vocab[i] in added_vocab:
+ tokens.append(reverse_vocab[i])
+ if tokenizer.added_tokens_decoder[i].special:
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ else:
+ tokens.append(reverse_vocab[i])
+ toktypes.append(gguf.TokenType.NORMAL)
gguf_writer.add_token_list(tokens)
-gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)
special_vocab = gguf.SpecialVocab(dir_model, load_merges=True, n_vocab = len(tokens))
diff --git a/convert-starcoder-hf-to-gguf.py b/convert-starcoder-hf-to-gguf.py
index fe8815cb..a9bfed85 100755
--- a/convert-starcoder-hf-to-gguf.py
+++ b/convert-starcoder-hf-to-gguf.py
@@ -111,17 +111,25 @@ tokenizer = AutoTokenizer.from_pretrained(dir_model)
vocab_size = hparams.get("vocab_size", len(tokenizer.vocab))
assert max(tokenizer.vocab.values()) < vocab_size
+added_vocab = tokenizer.get_added_vocab()
reverse_vocab = {id: encoded_tok for encoded_tok, id in tokenizer.vocab.items()}
for i in range(vocab_size):
- tokens.append(reverse_vocab[i] if i in reverse_vocab else f"[PAD{i}]")
- scores.append(0.0) # dummy
- toktypes.append(gguf.TokenType.NORMAL)
+ if i not in reverse_vocab:
+ tokens.append(f"[PAD{i}]")
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ elif reverse_vocab[i] in added_vocab:
+ tokens.append(reverse_vocab[i])
+ if tokenizer.added_tokens_decoder[i].special:
+ toktypes.append(gguf.TokenType.CONTROL)
+ else:
+ toktypes.append(gguf.TokenType.USER_DEFINED)
+ else:
+ tokens.append(reverse_vocab[i])
+ toktypes.append(gguf.TokenType.NORMAL)
gguf_writer.add_token_list(tokens)
-gguf_writer.add_token_scores(scores)
gguf_writer.add_token_types(toktypes)
-
special_vocab = gguf.SpecialVocab(dir_model, load_merges = True, n_vocab = len(tokens))
special_vocab.add_to_gguf(gguf_writer)