summaryrefslogtreecommitdiff
path: root/tests/test-tokenizer-0-bpe.py
diff options
context:
space:
mode:
Diffstat (limited to 'tests/test-tokenizer-0-bpe.py')
-rw-r--r--tests/test-tokenizer-0-bpe.py41
1 files changed, 25 insertions, 16 deletions
diff --git a/tests/test-tokenizer-0-bpe.py b/tests/test-tokenizer-0-bpe.py
index 33a27244..6b70ad03 100644
--- a/tests/test-tokenizer-0-bpe.py
+++ b/tests/test-tokenizer-0-bpe.py
@@ -7,15 +7,20 @@
# python3 tests/test-tokenizer-0-bpe.py ~/Data/huggingface/deepseek-coder-6.7b-instruct/
#
+import logging
import argparse
from transformers import AutoTokenizer
+logger = logging.getLogger("test-tokenizer-0-bpe")
+
parser = argparse.ArgumentParser()
parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file")
parser.add_argument("--fname-tok", help="path to a text file to tokenize")
-args = parser.parse_args()
+parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+args = parser.parse_args()
+logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
dir_tokenizer = args.dir_tokenizer
tokenizer = AutoTokenizer.from_pretrained(dir_tokenizer)
@@ -64,30 +69,34 @@ tests = [
]
for text in tests:
- print('text: ', text)
- print(tokenizer.encode(text))
- print(tokenizer.decode(tokenizer.encode(text)))
+ logger.info(f"text: {text}")
+ logger.info(tokenizer.encode(text))
+ logger.info(tokenizer.decode(tokenizer.encode(text)))
-print("\n\ntests for C++:\n")
+logger.info("tests for C++:")
for text in tests:
res = tokenizer.encode(text)
+ # Modify text representation for logging
k = text.replace('\n', '\\n')
k = k.replace('\t', '\\t')
k = '"' + k + '"'
- print("{ %-24s, { " % k, end='')
+
+ # Log the modified text and its encoding
+ log_message = "{ %-24s, { " % k
for x in res:
- print("%7d," % x, end='')
- print(" }, },")
+ log_message += "%7d," % x
+ log_message += " }, },"
+ logger.info(log_message)
-print(tokenizer.encode('hello'))
-print(tokenizer.encode('world'))
-print(tokenizer.encode(' world'))
-print(tokenizer.encode('hello world'))
+logger.info(tokenizer.encode('hello'))
+logger.info(tokenizer.encode('world'))
+logger.info(tokenizer.encode(' world'))
+logger.info(tokenizer.encode('hello world'))
fname_tok = args.fname_tok
if fname_tok:
- print('tokenizing file: ', fname_tok)
+ logger.info(f"tokenizing file: {fname_tok}")
fname_out = fname_tok + '.tok'
with open(fname_tok, 'r', encoding='utf-8') as f:
lines = f.readlines()
@@ -112,6 +121,6 @@ if fname_tok:
# else:
# f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n')
f.write(str(x) + ' \'' + tokenizer.decode(x).strip() + '\'\n')
- print('len(res): ', len(res))
- print('len(lines): ', len(lines))
- print('results written to: ', fname_out)
+ logger.info(f"len(res): {len(res)}")
+ logger.info(f"len(lines): {len(lines)}")
+ logger.info(f"results written to: {fname_out}")