summaryrefslogtreecommitdiff
path: root/convert-llama-ggml-to-gguf.py
diff options
context:
space:
mode:
authorKerfuffle <44031344+KerfuffleV2@users.noreply.github.com>2023-11-10 22:04:50 -0700
committerGitHub <noreply@github.com>2023-11-11 08:04:50 +0300
commit34b0a082074b073eb14c2bd93c0c070e20ddcd16 (patch)
tree6bbc77442dac1294ae1f74f48a722437cadf1cd0 /convert-llama-ggml-to-gguf.py
parent4a4fd3eefad5bd17ab6bcd8e2181b4f62eae76cf (diff)
gguf-py: Refactor and allow reading/modifying existing GGUF files (#3981)
* gguf-py: Refactor and add file reading support * Replay changes from #3871 Credit to @cebtenzzre for that pull * Various type annotation fixes. * sort imports with isort (again) * Fix missing return statement in add_tensor * style cleanup with flake8 * fix NamedTuple and Enum usage * Fix an issue with state init in GGUFReader Move examples to an examples/ directory Clean up examples Add an example of modifying keys in a GGUF file Update documentation with info on examples Try to support people importing gguf/gguf.py directly * Damagage is not a word. * Clean up gguf-py/examples/modify_gguf.py whitespace Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update gguf-py/examples/modify_gguf.py formatting Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update gguf-py/gguf/gguf_reader.py type hint Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Make examples executable, formatting changes * Add more information to GGUFReader and examples comments * Include a gguf Python package version bump * Add convert-gguf-endian.py script * cleanup * gguf-py : bump minor version * Reorganize scripts * Make GGUFReader endian detection less arbitrary * Add JSON dumping support to gguf-dump.py Which I kind of regret now * A few for gguf-dump.py cleanups * Murder accidental tuple in gguf-py/scripts/gguf-dump.py Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * cleanup * constants : remove unneeded type annotations * fix python 3.8 compat * Set up gguf- scripts in pyproject.toml * And include scripts/__init__.py, derp * convert.py: We can't currently support Q8_0 on big endian. * gguf-py: SpecialVocab: Always try available sources for special token ids gguf-py: SpecialVocab: Try to load merges from merges.txt if not in tokenizer.json gguf-py: SpecialVocab: Add 'add_bos_token' type bools to GGUF metadata u * cleanup * Promote add_X_token to GGUF metadata for BOS and EOS --------- Co-authored-by: Jared Van Bortel <jared@nomic.ai> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com>
Diffstat (limited to 'convert-llama-ggml-to-gguf.py')
-rwxr-xr-xconvert-llama-ggml-to-gguf.py24
1 files changed, 2 insertions, 22 deletions
diff --git a/convert-llama-ggml-to-gguf.py b/convert-llama-ggml-to-gguf.py
index 871add64..d898d81c 100755
--- a/convert-llama-ggml-to-gguf.py
+++ b/convert-llama-ggml-to-gguf.py
@@ -12,29 +12,9 @@ import numpy as np
import os
if 'NO_LOCAL_GGUF' not in os.environ:
- sys.path.insert(1, str(Path(__file__).parent / 'gguf-py' / 'gguf'))
+ sys.path.insert(1, str(Path(__file__).parent / 'gguf-py'))
import gguf
-# Note: Does not support GGML_QKK_64
-QK_K = 256
-# Items here are (block size, type size)
-GGML_QUANT_SIZES = {
- gguf.GGMLQuantizationType.F32 : (1, 4),
- gguf.GGMLQuantizationType.F16 : (1, 2),
- gguf.GGMLQuantizationType.Q4_0 : (32, 2 + 16),
- gguf.GGMLQuantizationType.Q4_1 : (32, 2 + 2 + 16),
- gguf.GGMLQuantizationType.Q5_0 : (32, 2 + 4 + 16),
- gguf.GGMLQuantizationType.Q5_1 : (32, 2 + 2 + 4 + 16),
- gguf.GGMLQuantizationType.Q8_0 : (32, 2 + 32),
- gguf.GGMLQuantizationType.Q8_1 : (32, 4 + 4 + 32),
- gguf.GGMLQuantizationType.Q2_K : (256, 2 + 2 + QK_K // 16 + QK_K // 4),
- gguf.GGMLQuantizationType.Q3_K : (256, 2 + QK_K // 4 + QK_K // 8 + 12),
- gguf.GGMLQuantizationType.Q4_K : (256, 2 + 2 + QK_K // 2 + 12),
- gguf.GGMLQuantizationType.Q5_K : (256, 2 + 2 + QK_K // 2 + QK_K // 8 + 12),
- gguf.GGMLQuantizationType.Q6_K : (256, 2 + QK_K // 2 + QK_K // 4 + QK_K // 16),
- gguf.GGMLQuantizationType.Q8_K : (256, 4 + QK_K + QK_K // 8),
-}
-
class GGMLFormat(IntEnum):
GGML = 0
GGMF = 1
@@ -125,7 +105,7 @@ class Tensor:
(n_dims, name_len, dtype) = struct.unpack('<3I', data[offset:offset + 12])
assert n_dims >= 0 and n_dims <= 4, f'Invalid tensor dimensions {n_dims}'
assert name_len < 4096, 'Absurd tensor name length'
- quant = GGML_QUANT_SIZES.get(dtype)
+ quant = gguf.GGML_QUANT_SIZES.get(dtype)
assert quant is not None, 'Unknown tensor type'
(blksize, tysize) = quant
offset += 12