diff options
author | Brian <mofosyne@gmail.com> | 2024-05-04 05:36:41 +1000 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-03 22:36:41 +0300 |
commit | a2ac89d6efb41b535778bfeaecaae8fe295b6ed3 (patch) | |
tree | 584a6f5316a627e64bfbc3aa5e098b911aef285a /tests/test-tokenizer-0-spm.py | |
parent | 433def286e98751bf17db75dce53847d075c0be5 (diff) |
convert.py : add python logging instead of print() (#6511)
* convert.py: add python logging instead of print()
* convert.py: verbose flag takes priority over dump flag log suppression
* convert.py: named instance logging
* convert.py: use explicit logger id string
* convert.py: convert extra print() to named logger
* convert.py: sys.stderr.write --> logger.error
* *.py: Convert all python scripts to use logging module
* requirements.txt: remove extra line
* flake8: update flake8 ignore and exclude to match ci settings
* gh-actions: add flake8-no-print to flake8 lint step
* pre-commit: add flake8-no-print to flake8 and also update pre-commit version
* convert-hf-to-gguf.py: print() to logger conversion
* *.py: logging basiconfig refactor to use conditional expression
* *.py: removed commented out logging
* fixup! *.py: logging basiconfig refactor to use conditional expression
* constant.py: logger.error then exit should be a raise exception instead
* *.py: Convert logger error and sys.exit() into a raise exception (for atypical error)
* gguf-convert-endian.py: refactor convert_byteorder() to use tqdm progressbar
* verify-checksum-model.py: This is the result of the program, it should be printed to stdout.
* compare-llama-bench.py: add blank line for readability during missing repo response
* reader.py: read_gguf_file() use print() over logging
* convert.py: warning goes to stderr and won't hurt the dump output
* gguf-dump.py: dump_metadata() should print to stdout
* convert-hf-to-gguf.py: print --> logger.debug or ValueError()
* verify-checksum-models.py: use print() for printing table
* *.py: refactor logging.basicConfig()
* gguf-py/gguf/*.py: use __name__ as logger name
Since they will be imported and not run directly.
* python-lint.yml: use .flake8 file instead
* constants.py: logger no longer required
* convert-hf-to-gguf.py: add additional logging
* convert-hf-to-gguf.py: print() --> logger
* *.py: fix flake8 warnings
* revert changes to convert-hf-to-gguf.py for get_name()
* convert-hf-to-gguf-update.py: use triple quoted f-string instead
* *.py: accidentally corrected the wrong line
* *.py: add compilade warning suggestions and style fixes
Diffstat (limited to 'tests/test-tokenizer-0-spm.py')
-rw-r--r-- | tests/test-tokenizer-0-spm.py | 66 |
1 files changed, 39 insertions, 27 deletions
diff --git a/tests/test-tokenizer-0-spm.py b/tests/test-tokenizer-0-spm.py index be12a6b9..4b80a438 100644 --- a/tests/test-tokenizer-0-spm.py +++ b/tests/test-tokenizer-0-spm.py @@ -7,15 +7,22 @@ # +import logging import argparse from sentencepiece import SentencePieceProcessor +logger = logging.getLogger("test-tokenizer-0-spm") + parser = argparse.ArgumentParser() parser.add_argument("dir_tokenizer", help="directory containing 'tokenizer.model' file") parser.add_argument("--fname-tok", help="path to a text file to tokenize") +parser.add_argument("--verbose", action="store_true", help="increase output verbosity") + args = parser.parse_args() +logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO) + dir_tokenizer = args.dir_tokenizer tokenizer = SentencePieceProcessor(dir_tokenizer + '/tokenizer.model') @@ -65,41 +72,46 @@ tests = [ for text in tests: - print('text: ', text) - print('\nwith bos:') - print(tokenizer.encode(text, add_bos=True)) - print(tokenizer.decode(tokenizer.encode(text, add_bos=True))) - print('\nwithout bos:') - print(tokenizer.encode(text, add_bos=False)) - print(tokenizer.decode(tokenizer.encode(text, add_bos=False))) - -print("'" + tokenizer.id_to_piece(15043) + "'") # '_Hello' -print("'" + tokenizer.id_to_piece(29871) + "'") # '_' -print("'" + tokenizer.decode([15043]) + "'") # 'Hello' -print("'" + tokenizer.decode([15043, 15043]) + "'") # 'Hello Hello' -print("'" + tokenizer.decode([29871, 15043]) + "'") # ' Hello' -print("'" + tokenizer.decode([29871, 15043, 29871, 15043]) + "'") # ' Hello Hello' - -print("\n\ntests for C++:\n") + message_log = (f"text: {text}\n" + "with bos:\n" + f"{tokenizer.encode(text, add_bos=True)}\n" + f"{tokenizer.decode(tokenizer.encode(text, add_bos=True))}\n" + "without bos:\n" + f"{tokenizer.encode(text, add_bos=False)}\n" + f"{tokenizer.decode(tokenizer.encode(text, add_bos=False))}\n") + logger.info(message_log) + +logger.info(f"'{tokenizer.id_to_piece(15043)}'") # '_Hello' +logger.info(f"'{tokenizer.id_to_piece(29871)}'") # '_' +logger.info(f"'{tokenizer.decode([15043])}'") # 'Hello' +logger.info(f"'{tokenizer.decode([15043, 15043])}'") # 'Hello Hello' +logger.info(f"'{tokenizer.decode([29871, 15043])}'") # ' Hello' +logger.info(f"'{tokenizer.decode([29871, 15043, 29871, 15043])}'") # ' Hello Hello' + +logger.info("\n\ntests for C++:\n") for text in tests: res = tokenizer.encode(text, add_bos=False) + # Modify text representation for logging k = text.replace('\n', '\\n') k = k.replace('\t', '\\t') k = '"' + k + '"' - print("{ %-24s, { " % k, end='') + + # Log the modified text and its encoding + log_message = "{ %-24s, { " % k for x in res: - print("%7d," % x, end='') - print(" }, },") + log_message += "%7d," % x + log_message += " }, }," + logger.info(log_message) -print(tokenizer.encode('hello')) -print(tokenizer.encode('world')) -print(tokenizer.encode(' world')) -print(tokenizer.encode('hello world')) +logger.info(tokenizer.encode('hello')) +logger.info(tokenizer.encode('world')) +logger.info(tokenizer.encode(' world')) +logger.info(tokenizer.encode('hello world')) fname_tok = args.fname_tok if fname_tok: - print('tokenizing file: ', fname_tok) + logger.info(f"tokenizing file: {fname_tok}") fname_out = fname_tok + '.tok' with open(fname_tok, 'r', encoding='utf-8') as f: lines = f.readlines() @@ -109,6 +121,6 @@ if fname_tok: with open(fname_out, 'w', encoding='utf-8') as f: for x in res: f.write(str(x) + ' \'' + tokenizer.decode(x) + '\'\n') - print('len(res): ', len(res)) - print('len(lines): ', len(lines)) - print('results written to: ', fname_out) + logger.info(f"len(res): {len(res)}") + logger.info(f"len(lines): {len(lines)}") + logger.info(f"results written to: {fname_out}") |