summaryrefslogtreecommitdiff
path: root/gguf-py/scripts/gguf-convert-endian.py
diff options
context:
space:
mode:
authorKerfuffle <44031344+KerfuffleV2@users.noreply.github.com>2023-11-10 22:04:50 -0700
committerGitHub <noreply@github.com>2023-11-11 08:04:50 +0300
commit34b0a082074b073eb14c2bd93c0c070e20ddcd16 (patch)
tree6bbc77442dac1294ae1f74f48a722437cadf1cd0 /gguf-py/scripts/gguf-convert-endian.py
parent4a4fd3eefad5bd17ab6bcd8e2181b4f62eae76cf (diff)
gguf-py: Refactor and allow reading/modifying existing GGUF files (#3981)
* gguf-py: Refactor and add file reading support * Replay changes from #3871 Credit to @cebtenzzre for that pull * Various type annotation fixes. * sort imports with isort (again) * Fix missing return statement in add_tensor * style cleanup with flake8 * fix NamedTuple and Enum usage * Fix an issue with state init in GGUFReader Move examples to an examples/ directory Clean up examples Add an example of modifying keys in a GGUF file Update documentation with info on examples Try to support people importing gguf/gguf.py directly * Damagage is not a word. * Clean up gguf-py/examples/modify_gguf.py whitespace Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update gguf-py/examples/modify_gguf.py formatting Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update gguf-py/gguf/gguf_reader.py type hint Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Make examples executable, formatting changes * Add more information to GGUFReader and examples comments * Include a gguf Python package version bump * Add convert-gguf-endian.py script * cleanup * gguf-py : bump minor version * Reorganize scripts * Make GGUFReader endian detection less arbitrary * Add JSON dumping support to gguf-dump.py Which I kind of regret now * A few for gguf-dump.py cleanups * Murder accidental tuple in gguf-py/scripts/gguf-dump.py Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * cleanup * constants : remove unneeded type annotations * fix python 3.8 compat * Set up gguf- scripts in pyproject.toml * And include scripts/__init__.py, derp * convert.py: We can't currently support Q8_0 on big endian. * gguf-py: SpecialVocab: Always try available sources for special token ids gguf-py: SpecialVocab: Try to load merges from merges.txt if not in tokenizer.json gguf-py: SpecialVocab: Add 'add_bos_token' type bools to GGUF metadata u * cleanup * Promote add_X_token to GGUF metadata for BOS and EOS --------- Co-authored-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com>
Diffstat (limited to 'gguf-py/scripts/gguf-convert-endian.py')
-rwxr-xr-xgguf-py/scripts/gguf-convert-endian.py113
1 files changed, 113 insertions, 0 deletions
diff --git a/gguf-py/scripts/gguf-convert-endian.py b/gguf-py/scripts/gguf-convert-endian.py
new file mode 100755
index 00000000..b79d86e0
--- /dev/null
+++ b/gguf-py/scripts/gguf-convert-endian.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python3
+from __future__ import annotations
+
+import argparse
+import os
+import sys
+from pathlib import Path
+
+import numpy as np
+
+# Necessary to load the local gguf package
+if "NO_LOCAL_GGUF" not in os.environ and (Path(__file__).parent.parent.parent / 'gguf-py').exists():
+ sys.path.insert(0, str(Path(__file__).parent.parent))
+
+import gguf
+
+
+def convert_byteorder(reader: gguf.GGUFReader, args: argparse.Namespace) -> None:
+ if np.uint32(1) == np.uint32(1).newbyteorder("<"):
+ # Host is little endian
+ host_endian = "little"
+ swapped_endian = "big"
+ else:
+ # Sorry PDP or other weird systems that don't use BE or LE.
+ host_endian = "big"
+ swapped_endian = "little"
+ if reader.byte_order == "S":
+ file_endian = swapped_endian
+ else:
+ file_endian = host_endian
+ if args.order == "native":
+ order = host_endian
+ print(f"* Host is {host_endian.upper()} endian, GGUF file seems to be {file_endian.upper()} endian")
+ if file_endian == order:
+ print(f"* File is already {order.upper()} endian. Nothing to do.")
+ sys.exit(0)
+ print("* Checking tensors for conversion compatibility")
+ for tensor in reader.tensors:
+ if tensor.tensor_type not in (
+ gguf.GGMLQuantizationType.F32,
+ gguf.GGMLQuantizationType.F16,
+ gguf.GGMLQuantizationType.Q8_0,
+ ):
+ raise ValueError(f"Cannot handle type {tensor.tensor_type.name} for tensor {repr(tensor.name)}")
+ print(f"* Preparing to convert from {file_endian.upper()} to {order.upper()}")
+ if args.dry_run:
+ return
+ print("\n*** Warning *** Warning *** Warning **")
+ print("* This conversion process may damage the file. Ensure you have a backup.")
+ if order != host_endian:
+ print("* Requested endian differs from host, you will not be able to load the model on this machine.")
+ print("* The file will be modified immediately, so if conversion fails or is interrupted")
+ print("* the file will be corrupted. Enter exactly YES if you are positive you want to proceed:")
+ response = input("YES, I am sure> ")
+ if response != "YES":
+ print("You didn't enter YES. Okay then, see ya!")
+ sys.exit(0)
+ print(f"\n* Converting fields ({len(reader.fields)})")
+ for idx, field in enumerate(reader.fields.values()):
+ print(f"- {idx:4}: Converting field {repr(field.name)}, part count: {len(field.parts)}")
+ for part in field.parts:
+ part.byteswap(inplace=True)
+ print(f"\n* Converting tensors ({len(reader.tensors)})")
+ for idx, tensor in enumerate(reader.tensors):
+ print(
+ f" - {idx:4}: Converting tensor {repr(tensor.name)}, type={tensor.tensor_type.name}, "
+ f"elements={tensor.n_elements}... ",
+ end="",
+ )
+ tensor_type = tensor.tensor_type
+ for part in tensor.field.parts:
+ part.byteswap(inplace=True)
+ if tensor_type != gguf.GGMLQuantizationType.Q8_0:
+ tensor.data.byteswap(inplace=True)
+ print()
+ continue
+ # A Q8_0 block consists of a f16 delta followed by 32 int8 quants, so 34 bytes
+ block_size = 34
+ n_blocks = len(tensor.data) // block_size
+ for block_num in range(n_blocks):
+ block_offs = block_num * block_size
+ # I know I said f16, but it doesn't matter here - any simple 16 bit type works.
+ delta = tensor.data[block_offs:block_offs + 2].view(dtype=np.uint16)
+ delta.byteswap(inplace=True)
+ if block_num % 100000 == 0:
+ print(f"[{(n_blocks - block_num) // 1000}K]", end="")
+ sys.stdout.flush()
+ print()
+ print("* Completion")
+
+
+def main() -> None:
+ parser = argparse.ArgumentParser(description="Convert GGUF file byte order")
+ parser.add_argument(
+ "model", type=str,
+ help="GGUF format model filename",
+ )
+ parser.add_argument(
+ "order", type=str, choices=['big', 'little', 'native'],
+ help="Requested byte order",
+ )
+ parser.add_argument(
+ "--dry-run", action="store_true",
+ help="Don't actually change anything",
+ )
+ args = parser.parse_args(None if len(sys.argv) > 1 else ["--help"])
+ print(f'* Loading: {args.model}')
+ reader = gguf.GGUFReader(args.model, 'r' if args.dry_run else 'r+')
+ convert_byteorder(reader, args)
+
+
+if __name__ == "__main__":
+ main()