From 2a20f48efad692a8c2744f10c673bbdbe0c751b7 Mon Sep 17 00:00:00 2001 From: Val Kharitonov Date: Mon, 13 Mar 2023 12:24:18 -0400 Subject: Fix UTF-8 handling (including colors) (#79) --- convert-pth-to-ggml.py | 29 ++++++++++++++++++++++------- 1 file changed, 22 insertions(+), 7 deletions(-) (limited to 'convert-pth-to-ggml.py') diff --git a/convert-pth-to-ggml.py b/convert-pth-to-ggml.py index fc217c7e..d2557500 100644 --- a/convert-pth-to-ggml.py +++ b/convert-pth-to-ggml.py @@ -22,7 +22,6 @@ import json import struct import numpy as np import torch - from sentencepiece import SentencePieceProcessor if len(sys.argv) < 3: @@ -101,12 +100,28 @@ for p in range(n_parts): # Is this correct?? for i in range(32000): - # TODO: this is probably wrong - not sure how this tokenizer works - text = tokenizer.decode([29889, i]).encode('utf-8') - # remove the first byte (it's always '.') - text = text[1:] - fout.write(struct.pack("i", len(text))) - fout.write(text) + if tokenizer.is_unknown(i): + # "" token (translated as ??) + text = " \u2047 ".encode("utf-8") + fout.write(struct.pack("i", len(text))) + fout.write(text) + elif tokenizer.is_control(i): + # ""/"" tokens + fout.write(struct.pack("i", 0)) + elif tokenizer.is_byte(i): + # "" tokens (which may be invalid UTF-8) + piece = tokenizer.id_to_piece(i) + if len(piece) != 6: + print("Invalid token: " + piece) + sys.exit(1) + byte_value = int(piece[3:-1], 16) + fout.write(struct.pack("i", 1)) + fout.write(struct.pack("B", byte_value)) + else: + # normal token. Uses U+2581 (LOWER ONE EIGHTH BLOCK) to represent spaces. + text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8") + fout.write(struct.pack("i", len(text))) + fout.write(text) for k, v in model.items(): name = k -- cgit v1.2.3