summaryrefslogtreecommitdiff
path: root/gguf-py
diff options
context:
space:
mode:
authorOndřej Čertík <ondrej@certik.us>2024-03-15 02:46:51 -0600
committerGitHub <noreply@github.com>2024-03-15 10:46:51 +0200
commit7ce2c77f88e1ca66ec48417e56f91746bac018c2 (patch)
treeb6f35f35a17641d7665799deb55ed3f9f59a116a /gguf-py
parentaab606a11fc0a9740a7f297521c3eef851dfb351 (diff)
gguf : add support for I64 and F64 arrays (#6062)
* gguf : add support for I64 and F64 arrays GGML currently does not support I64 or F64 arrays and they are not often used in machine learning, however if in the future the need arises, it would be nice to add them now, so that the types are next to the other types I8, I16, I32 in the enums, and it also reserves their type number. Furthermore, with this addition the GGUF format becomes very usable for most computational applications of NumPy (being compatible with the most common NumPy dtypes: i8, i16, i32, i64, f32, f64), providing a faster, and more versatile alternative to the `npz` format, and a simpler alternative to the `hdf5` format. The change in this PR seems small, not significantly increasing the maintenance burden. I tested this from Python using GGUFWriter/Reader and `gguf-dump`, as well as from C, everything seems to work. * Fix compiler warnings
Diffstat (limited to 'gguf-py')
-rw-r--r--gguf-py/gguf/constants.py4
-rw-r--r--gguf-py/gguf/gguf_reader.py12
-rw-r--r--gguf-py/gguf/gguf_writer.py12
3 files changed, 21 insertions, 7 deletions
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index 2d7cf16c..458a641d 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -665,6 +665,8 @@ class GGMLQuantizationType(IntEnum):
I8 = 24
I16 = 25
I32 = 26
+ I64 = 27
+ F64 = 28
class GGUFEndian(IntEnum):
@@ -734,6 +736,8 @@ GGML_QUANT_SIZES = {
GGMLQuantizationType.I8: (1, 1),
GGMLQuantizationType.I16: (1, 2),
GGMLQuantizationType.I32: (1, 4),
+ GGMLQuantizationType.I64: (1, 8),
+ GGMLQuantizationType.F64: (1, 8),
}
diff --git a/gguf-py/gguf/gguf_reader.py b/gguf-py/gguf/gguf_reader.py
index 1c10f575..33afac55 100644
--- a/gguf-py/gguf/gguf_reader.py
+++ b/gguf-py/gguf/gguf_reader.py
@@ -242,12 +242,15 @@ class GGUFReader:
n_bytes = n_elems * type_size // block_size
data_offs = int(start_offs + offset_tensor[0])
item_type: npt.DTypeLike
- if ggml_type == GGMLQuantizationType.F32:
+ if ggml_type == GGMLQuantizationType.F16:
+ item_count = n_elems
+ item_type = np.float16
+ elif ggml_type == GGMLQuantizationType.F32:
item_count = n_elems
item_type = np.float32
- elif ggml_type == GGMLQuantizationType.F16:
+ elif ggml_type == GGMLQuantizationType.F64:
item_count = n_elems
- item_type = np.float16
+ item_type = np.float64
elif ggml_type == GGMLQuantizationType.I8:
item_count = n_elems
item_type = np.int8
@@ -257,6 +260,9 @@ class GGUFReader:
elif ggml_type == GGMLQuantizationType.I32:
item_count = n_elems
item_type = np.int32
+ elif ggml_type == GGMLQuantizationType.I64:
+ item_count = n_elems
+ item_type = np.int64
else:
item_count = n_bytes
item_type = np.uint8
diff --git a/gguf-py/gguf/gguf_writer.py b/gguf-py/gguf/gguf_writer.py
index 81b2eb88..1967b633 100644
--- a/gguf-py/gguf/gguf_writer.py
+++ b/gguf-py/gguf/gguf_writer.py
@@ -204,18 +204,22 @@ class GGUFWriter:
for i in range(n_dims):
self.ti_data += self._pack("Q", tensor_shape[n_dims - 1 - i])
if raw_dtype is None:
- if tensor_dtype == np.float32:
- dtype = GGMLQuantizationType.F32
- elif tensor_dtype == np.float16:
+ if tensor_dtype == np.float16:
dtype = GGMLQuantizationType.F16
+ elif tensor_dtype == np.float32:
+ dtype = GGMLQuantizationType.F32
+ elif tensor_dtype == np.float64:
+ dtype = GGMLQuantizationType.F64
elif tensor_dtype == np.int8:
dtype = GGMLQuantizationType.I8
elif tensor_dtype == np.int16:
dtype = GGMLQuantizationType.I16
elif tensor_dtype == np.int32:
dtype = GGMLQuantizationType.I32
+ elif tensor_dtype == np.int64:
+ dtype = GGMLQuantizationType.I64
else:
- raise ValueError("Only F32, F16, I8, I16, I32 tensors are supported for now")
+ raise ValueError("Only F16, F32, F64, I8, I16, I32, I64 tensors are supported for now")
else:
dtype = raw_dtype
self.ti_data += self._pack("I", dtype)