summaryrefslogtreecommitdiff
path: root/gguf-py/scripts
diff options
context:
space:
mode:
authorBrian <mofosyne@gmail.com>2024-05-04 05:36:41 +1000
committerGitHub <noreply@github.com>2024-05-03 22:36:41 +0300
commita2ac89d6efb41b535778bfeaecaae8fe295b6ed3 (patch)
tree584a6f5316a627e64bfbc3aa5e098b911aef285a /gguf-py/scripts
parent433def286e98751bf17db75dce53847d075c0be5 (diff)
convert.py : add python logging instead of print() (#6511)
* convert.py: add python logging instead of print() * convert.py: verbose flag takes priority over dump flag log suppression * convert.py: named instance logging * convert.py: use explicit logger id string * convert.py: convert extra print() to named logger * convert.py: sys.stderr.write --> logger.error * *.py: Convert all python scripts to use logging module * requirements.txt: remove extra line * flake8: update flake8 ignore and exclude to match ci settings * gh-actions: add flake8-no-print to flake8 lint step * pre-commit: add flake8-no-print to flake8 and also update pre-commit version * convert-hf-to-gguf.py: print() to logger conversion * *.py: logging basiconfig refactor to use conditional expression * *.py: removed commented out logging * fixup! *.py: logging basiconfig refactor to use conditional expression * constant.py: logger.error then exit should be a raise exception instead * *.py: Convert logger error and sys.exit() into a raise exception (for atypical error) * gguf-convert-endian.py: refactor convert_byteorder() to use tqdm progressbar * verify-checksum-model.py: This is the result of the program, it should be printed to stdout. * compare-llama-bench.py: add blank line for readability during missing repo response * reader.py: read_gguf_file() use print() over logging * convert.py: warning goes to stderr and won't hurt the dump output * gguf-dump.py: dump_metadata() should print to stdout * convert-hf-to-gguf.py: print --> logger.debug or ValueError() * verify-checksum-models.py: use print() for printing table * *.py: refactor logging.basicConfig() * gguf-py/gguf/*.py: use __name__ as logger name Since they will be imported and not run directly. * python-lint.yml: use .flake8 file instead * constants.py: logger no longer required * convert-hf-to-gguf.py: add additional logging * convert-hf-to-gguf.py: print() --> logger * *.py: fix flake8 warnings * revert changes to convert-hf-to-gguf.py for get_name() * convert-hf-to-gguf-update.py: use triple quoted f-string instead * *.py: accidentally corrected the wrong line * *.py: add compilade warning suggestions and style fixes
Diffstat (limited to 'gguf-py/scripts')
-rwxr-xr-xgguf-py/scripts/gguf-convert-endian.py94
-rwxr-xr-xgguf-py/scripts/gguf-dump.py29
-rwxr-xr-xgguf-py/scripts/gguf-set-metadata.py31
3 files changed, 96 insertions, 58 deletions
diff --git a/gguf-py/scripts/gguf-convert-endian.py b/gguf-py/scripts/gguf-convert-endian.py
index 10a16ad0..b698af0f 100755
--- a/gguf-py/scripts/gguf-convert-endian.py
+++ b/gguf-py/scripts/gguf-convert-endian.py
@@ -1,9 +1,11 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import argparse
import os
import sys
+from tqdm import tqdm
from pathlib import Path
import numpy as np
@@ -14,6 +16,8 @@ if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent /
import gguf
+logger = logging.getLogger("gguf-convert-endian")
+
def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None:
if np.uint32(1) == np.uint32(1).newbyteorder("<"):
@@ -29,11 +33,11 @@ def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None
else:
file_endian = host_endian
order = host_endian if args.order == "native" else args.order
- print(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian")
+ logger.info(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian")
if file_endian == order:
- print(f"* File is already {order.upper()} endian. Nothing to do.")
+ logger.info(f"* File is already {order.upper()} endian. Nothing to do.")
sys.exit(0)
- print("* Checking tensors for conversion compatibility")
+ logger.info("* Checking tensors for conversion compatibility")
for tensor in reader.tensors:
if tensor.tensor_type not in (
gguf.GGMLQuantizationType.F32,
@@ -41,51 +45,64 @@ def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None
gguf.GGMLQuantizationType.Q8_0,
):
raise ValueError(f"Cannot handle type {tensor.tensor_type.name} for tensor {repr(tensor.name)}")
- print(f"* Preparing to convert from {file_endian.upper()} to {order.upper()}")
+ logger.info(f"* Preparing to convert from {file_endian.upper()} to {order.upper()}")
if args.dry_run:
return
- print("\n*** Warning *** Warning *** Warning **")
- print("* This conversion process may damage the file. Ensure you have a backup.")
+ logger.warning("*** Warning *** Warning *** Warning **")
+ logger.warning("* This conversion process may damage the file. Ensure you have a backup.")
if order != host_endian:
- print("* Requested endian differs from host, you will not be able to load the model on this machine.")
- print("* The file will be modified immediately, so if conversion fails or is interrupted")
- print("* the file will be corrupted. Enter exactly YES if you are positive you want to proceed:")
+ logger.warning("* Requested endian differs from host, you will not be able to load the model on this machine.")
+ logger.warning("* The file will be modified immediately, so if conversion fails or is interrupted")
+ logger.warning("* the file will be corrupted. Enter exactly YES if you are positive you want to proceed:")
response = input("YES, I am sure> ")
if response != "YES":
- print("You didn't enter YES. Okay then, see ya!")
+ logger.warning("You didn't enter YES. Okay then, see ya!")
sys.exit(0)
- print(f"\n* Converting fields ({len(reader.fields)})")
+ logger.info(f"* Converting fields ({len(reader.fields)})")
for idx, field in enumerate(reader.fields.values()):
- print(f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}")
+ logger.info(f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}")
for part in field.parts:
part.byteswap(inplace=True)
- print(f"\n* Converting tensors ({len(reader.tensors)})")
- for idx, tensor in enumerate(reader.tensors):
- print(
- f" - {idx:4}: Converting tensor {repr(tensor.name)}, type={tensor.tensor_type.name}, "
- f"elements={tensor.n_elements}... ",
- end="",
+ logger.info(f"* Converting tensors ({len(reader.tensors)})")
+
+ for idx, tensor in enumerate(pbar := tqdm(reader.tensors, desc="Converting tensor")):
+ log_message = (
+ f"Converting tensor {repr(tensor.name)}, "
+ f"type={tensor.tensor_type.name}, "
+ f"elements={tensor.n_elements} "
)
- tensor_type = tensor.tensor_type
+
+ # Byte-swap each part of the tensor's field
for part in tensor.field.parts:
part.byteswap(inplace=True)
- if tensor_type != gguf.GGMLQuantizationType.Q8_0:
+
+ # Byte-swap tensor data if necessary
+ if tensor.tensor_type == gguf.GGMLQuantizationType.Q8_0:
+ # Handle Q8_0 tensor blocks (block_q8_0)
+ # Specific handling of block_q8_0 is required.
+ # Each block_q8_0 consists of an f16 delta (scaling factor) followed by 32 int8 quantizations.
+
+ block_size = 34 # 34 bytes = <f16 delta scaling factor> + 32 * <int8 quant>
+
+ n_blocks = len(tensor.data) // block_size
+ for block_num in (inner_pbar := tqdm(range(n_blocks), desc="Byte-swapping Blocks", leave=False)):
+ block_offs = block_num * block_size
+
+ # Byte-Swap f16 sized delta field
+ delta = tensor.data[block_offs:block_offs + 2].view(dtype=np.uint16)
+ delta.byteswap(inplace=True)
+
+ # Byte-Swap Q8 weights
+ if block_num % 100000 == 0:
+ inner_pbar.set_description(f"Byte-swapping Blocks [{(n_blocks - block_num) // n_blocks}]")
+
+ else:
+ # Handle other tensor types
tensor.data.byteswap(inplace=True)
- print()
- continue
- # A Q8_0 block consists of a f16 delta followed by 32 int8 quants, so 34 bytes
- block_size = 34
- n_blocks = len(tensor.data) // block_size
- for block_num in range(n_blocks):
- block_offs = block_num * block_size
- # I know I said f16, but it doesn't matter here - any simple 16 bit type works.
- delta = tensor.data[block_offs:block_offs + 2].view(dtype=np.uint16)
- delta.byteswap(inplace=True)
- if block_num % 100000 == 0:
- print(f"[{(n_blocks - block_num) // 1000}K]", end="")
- sys.stdout.flush()
- print()
- print("* Completion")
+
+ pbar.set_description(log_message)
+
+ logger.info("* Completion")
def main() -> None:
@@ -102,8 +119,13 @@ def main() -> None:
"--dry-run", action="store_true",
help="Don't actually change anything",
)
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
- print(f'* Loading: {args.model}')
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+ logger.info(f'* Loading: {args.model}')
reader = gguf.GGUFReader(args.model, 'r' if args.dry_run else 'r+')
convert_byteorder(reader, args)
diff --git a/gguf-py/scripts/gguf-dump.py b/gguf-py/scripts/gguf-dump.py
index dbf89150..2d3c3943 100755
--- a/gguf-py/scripts/gguf-dump.py
+++ b/gguf-py/scripts/gguf-dump.py
@@ -1,6 +1,7 @@
#!/usr/bin/env python3
from __future__ import annotations
+import logging
import argparse
import os
import sys
@@ -15,6 +16,8 @@ if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent /
from gguf import GGUFReader, GGUFValueType # noqa: E402
+logger = logging.getLogger("gguf-dump")
+
def get_file_host_endian(reader: GGUFReader) -> tuple[str, str]:
host_endian = 'LITTLE' if np.uint32(1) == np.uint32(1).newbyteorder("<") else 'BIG'
@@ -29,8 +32,8 @@ def get_file_host_endian(reader: GGUFReader) -> tuple[str, str]:
# please see the comments in the modify_gguf.py example.
def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
host_endian, file_endian = get_file_host_endian(reader)
- print(f'* File is {file_endian} endian, script is running on a {host_endian} endian host.')
- print(f'\n* Dumping {len(reader.fields)} key/value pair(s)')
+ print(f'* File is {file_endian} endian, script is running on a {host_endian} endian host.') # noqa: NP100
+ print(f'* Dumping {len(reader.fields)} key/value pair(s)') # noqa: NP100
for n, field in enumerate(reader.fields.values(), 1):
if not field.types:
pretty_type = 'N/A'
@@ -39,20 +42,21 @@ def dump_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
pretty_type = '[' * nest_count + str(field.types[-1].name) + ']' * nest_count
else:
pretty_type = str(field.types[-1].name)
- print(f' {n:5}: {pretty_type:10} | {len(field.data):8} | {field.name}', end = '')
+
+ log_message = f' {n:5}: {pretty_type:10} | {len(field.data):8} | {field.name}'
if len(field.types) == 1:
curr_type = field.types[0]
if curr_type == GGUFValueType.STRING:
- print(' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf8')[:60])), end = '')
+ log_message += ' = {0}'.format(repr(str(bytes(field.parts[-1]), encoding='utf8')[:60]))
elif field.types[0] in reader.gguf_scalar_to_np:
- print(' = {0}'.format(field.parts[-1][0]), end = '')
- print()
+ log_message += ' = {0}'.format(field.parts[-1][0])
+ print(log_message) # noqa: NP100
if args.no_tensors:
return
- print(f'\n* Dumping {len(reader.tensors)} tensor(s)')
+ print(f'* Dumping {len(reader.tensors)} tensor(s)') # noqa: NP100
for n, tensor in enumerate(reader.tensors, 1):
prettydims = ', '.join('{0:5}'.format(d) for d in list(tensor.shape) + [1] * (4 - len(tensor.shape)))
- print(f' {n:5}: {tensor.n_elements:10} | {prettydims} | {tensor.tensor_type.name:7} | {tensor.name}')
+ print(f' {n:5}: {tensor.n_elements:10} | {prettydims} | {tensor.tensor_type.name:7} | {tensor.name}') # noqa: NP100
def dump_metadata_json(reader: GGUFReader, args: argparse.Namespace) -> None:
@@ -103,10 +107,17 @@ def main() -> None:
parser.add_argument("--no-tensors", action="store_true", help="Don't dump tensor metadata")
parser.add_argument("--json", action="store_true", help="Produce JSON output")
parser.add_argument("--json-array", action="store_true", help="Include full array values in JSON output (long)")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
if not args.json:
- print(f'* Loading: {args.model}')
+ logger.info(f'* Loading: {args.model}')
+
reader = GGUFReader(args.model, 'r')
+
if args.json:
dump_metadata_json(reader, args)
else:
diff --git a/gguf-py/scripts/gguf-set-metadata.py b/gguf-py/scripts/gguf-set-metadata.py
index 3ebdfa89..e35b651b 100755
--- a/gguf-py/scripts/gguf-set-metadata.py
+++ b/gguf-py/scripts/gguf-set-metadata.py
@@ -1,4 +1,5 @@
#!/usr/bin/env python3
+import logging
import argparse
import os
import sys
@@ -10,6 +11,8 @@ if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent /
from gguf import GGUFReader # noqa: E402
+logger = logging.getLogger("gguf-set-metadata")
+
def minimal_example(filename: str) -> None:
reader = GGUFReader(filename, 'r+')
@@ -41,36 +44,33 @@ def minimal_example(filename: str) -> None:
def set_metadata(reader: GGUFReader, args: argparse.Namespace) -> None:
field = reader.get_field(args.key)
if field is None:
- print(f'! Field {repr(args.key)} not found', file = sys.stderr)
+ logger.error(f'! Field {repr(args.key)} not found')
sys.exit(1)
# Note that field.types is a list of types. This is because the GGUF
# format supports arrays. For example, an array of UINT32 would
# look like [GGUFValueType.ARRAY, GGUFValueType.UINT32]
handler = reader.gguf_scalar_to_np.get(field.types[0]) if field.types else None
if handler is None:
- print(
- f'! This tool only supports changing simple values, {repr(args.key)} has unsupported type {field.types}',
- file = sys.stderr,
- )
+ logger.error(f'! This tool only supports changing simple values, {repr(args.key)} has unsupported type {field.types}')
sys.exit(1)
current_value = field.parts[field.data[0]][0]
new_value = handler(args.value)
- print(f'* Preparing to change field {repr(args.key)} from {current_value} to {new_value}')
+ logger.info(f'* Preparing to change field {repr(args.key)} from {current_value} to {new_value}')
if current_value == new_value:
- print(f'- Key {repr(args.key)} already set to requested value {current_value}')
+ logger.info(f'- Key {repr(args.key)} already set to requested value {current_value}')
sys.exit(0)
if args.dry_run:
sys.exit(0)
if not args.force:
- print('*** Warning *** Warning *** Warning **')
- print('* Changing fields in a GGUF file can make it unusable. Proceed at your own risk.')
- print('* Enter exactly YES if you are positive you want to proceed:')
+ logger.warning('*** Warning *** Warning *** Warning **')
+ logger.warning('* Changing fields in a GGUF file can make it unusable. Proceed at your own risk.')
+ logger.warning('* Enter exactly YES if you are positive you want to proceed:')
response = input('YES, I am sure> ')
if response != 'YES':
- print("You didn't enter YES. Okay then, see ya!")
+ logger.info("You didn't enter YES. Okay then, see ya!")
sys.exit(0)
field.parts[field.data[0]][0] = new_value
- print('* Field changed. Successful completion.')
+ logger.info('* Field changed. Successful completion.')
def main() -> None:
@@ -80,8 +80,13 @@ def main() -> None:
parser.add_argument("value", type=str, help="Metadata value to set")
parser.add_argument("--dry-run", action="store_true", help="Don't actually change anything")
parser.add_argument("--force", action="store_true", help="Change the field without confirmation")
+ parser.add_argument("--verbose", action="store_true", help="increase output verbosity")
+
args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
- print(f'* Loading: {args.model}')
+
+ logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
+
+ logger.info(f'* Loading: {args.model}')
reader = GGUFReader(args.model, 'r' if args.dry_run else 'r+')
set_metadata(reader, args)