From 92d0b751a77a089e650983e9f1564ef4d31b32b9 Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Thu, 31 Aug 2023 01:02:23 -0400 Subject: convert : fix python 3.8 support, modernize type annotations (#2916) * convert : fix python 3.8 support * convert : sort imports * convert : fix required parameters in convert-llama-ggmlv3-to-gguf * convert : fix mypy errors in convert-llama-ggmlv3-to-gguf * convert : use PEP 585 generics and PEP 604 unions Now that we have `from __future__ import annotations`, we can use this modern syntax in Python 3.7 instead of restricting support to Python 3.9 or 3.10 respectively. * gguf.py : a tuple is already a tuple * add mypy.ini * convert : add necessary `type: ignore` comments * gguf-py: bump version --- convert-lora-to-ggml.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) (limited to 'convert-lora-to-ggml.py') diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py index a00339b4..a937410d 100755 --- a/convert-lora-to-ggml.py +++ b/convert-lora-to-ggml.py @@ -1,15 +1,17 @@ #!/usr/bin/env python3 +from __future__ import annotations + import json import os import re import struct import sys -from typing import Any, Dict, Sequence, BinaryIO +from typing import Any, BinaryIO, Sequence import numpy as np import torch -NUMPY_TYPE_TO_FTYPE: Dict[str, int] = {"float32": 0, "float16": 1} +NUMPY_TYPE_TO_FTYPE: dict[str, int] = {"float32": 0, "float16": 1} HF_SUBLAYER_TO_GGML = { @@ -46,7 +48,7 @@ def translate_tensor_name(t: str) -> str: sys.exit(1) -def write_file_header(fout: BinaryIO, params: Dict[str, Any]) -> None: +def write_file_header(fout: BinaryIO, params: dict[str, Any]) -> None: fout.write(b"ggla"[::-1]) # magic (ggml lora) fout.write(struct.pack("i", 1)) # file version fout.write(struct.pack("i", params["r"])) -- cgit v1.2.3