diff options
author | Pedro Cuenca <pedro@huggingface.co> | 2024-03-26 13:32:19 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-26 14:32:19 +0200 |
commit | e097633f63fdd26d492844f7eff056e4083fd9eb (patch) | |
tree | af04986323076514b9d6b3accba33ada7c69af87 | |
parent | d25b1c31b07c3675443a55a828dd58cfef5a241c (diff) |
convert-hf : fix exception in sentencepiece with added tokens (#6320)
-rwxr-xr-x | convert-hf-to-gguf.py | 12 |
1 files changed, 8 insertions, 4 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 723ea18e..c5d2d0b7 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -331,7 +331,7 @@ class Model(ABC): tokenizer = SentencePieceProcessor(str(tokenizer_path)) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) - for token_id in range(vocab_size): + for token_id in range(tokenizer.vocab_size()): piece = tokenizer.id_to_piece(token_id) text = piece.encode("utf-8") score = tokenizer.get_score(token_id) @@ -356,9 +356,13 @@ class Model(ABC): added_tokens_json = json.load(f) for key in added_tokens_json: - tokens.append(key.encode("utf-8")) - scores.append(-1000.0) - toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + key = key.encode("utf-8") + if key not in tokens: + tokens.append(key) + scores.append(-1000.0) + toktypes.append(SentencePieceTokenTypes.USER_DEFINED) + + assert len(tokens) == vocab_size self.gguf_writer.add_tokenizer_model("llama") self.gguf_writer.add_token_list(tokens) |