summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-05-18 08:46:20 +0300
committerGitHub <noreply@github.com>2024-05-18 08:46:20 +0300
commitb49a13dd2fa9c94c2c19a8c248bb7fa45499f9a8 (patch)
tree824378920d71a282528c0322b3ea60e1203190cb
parent05834841dcb4f922983ea976539c70472272df9a (diff)
convert : fix set_vocab_sentencepiece (#6866)
* convert : fix set_vocab_sentencepiece * Update convert-hf-to-gguf.py
-rwxr-xr-xconvert-hf-to-gguf.py26
1 files changed, 15 insertions, 11 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index 5ba3161c..cd1750aa 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -573,6 +573,10 @@ class Model:
vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size())
+ tokens: list[bytes] = [f"[PAD{i}]".encode("utf-8") for i in range(vocab_size)]
+ scores: list[float] = [-10000.0] * vocab_size
+ toktypes: list[int] = [SentencePieceTokenTypes.UNKNOWN] * vocab_size
+
for token_id in range(tokenizer.vocab_size()):
piece = tokenizer.IdToPiece(token_id)
text = piece.encode("utf-8")
@@ -588,21 +592,23 @@ class Model:
elif tokenizer.IsByte(token_id):
toktype = SentencePieceTokenTypes.BYTE
- tokens.append(text)
- scores.append(score)
- toktypes.append(toktype)
+ tokens[token_id] = text
+ scores[token_id] = score
+ toktypes[token_id] = toktype
added_tokens_file = self.dir_model / 'added_tokens.json'
if added_tokens_file.is_file():
with open(added_tokens_file, "r", encoding="utf-8") as f:
added_tokens_json = json.load(f)
-
for key in added_tokens_json:
- key = key.encode("utf-8")
- if key not in tokens:
- tokens.append(key)
- scores.append(-1000.0)
- toktypes.append(SentencePieceTokenTypes.USER_DEFINED)
+ token_id = added_tokens_json[key]
+ if (token_id >= vocab_size):
+ logger.warning(f'ignore token {token_id}: id is out of range, max={vocab_size - 1}')
+ continue
+
+ tokens[token_id] = key.encode("utf-8")
+ scores[token_id] = -1000.0
+ toktypes[token_id] = SentencePieceTokenTypes.USER_DEFINED
if vocab_size > len(tokens):
pad_count = vocab_size - len(tokens)
@@ -612,8 +618,6 @@ class Model:
scores.append(-1000.0)
toktypes.append(SentencePieceTokenTypes.UNUSED)
- assert len(tokens) == vocab_size
-
self.gguf_writer.add_tokenizer_model("llama")
self.gguf_writer.add_tokenizer_pre("default")
self.gguf_writer.add_token_list(tokens)