diff options
author | Cebtenzzre <cebtenzzre@gmail.com> | 2023-08-31 01:02:23 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-31 08:02:23 +0300 |
commit | 92d0b751a77a089e650983e9f1564ef4d31b32b9 (patch) | |
tree | 28beced44af777ec563f489518a14509994187dd /convert-gptneox-hf-to-gguf.py | |
parent | 8afe2280009ecbfc9de2c93b8f41283dc810609a (diff) |
convert : fix python 3.8 support, modernize type annotations (#2916)
* convert : fix python 3.8 support
* convert : sort imports
* convert : fix required parameters in convert-llama-ggmlv3-to-gguf
* convert : fix mypy errors in convert-llama-ggmlv3-to-gguf
* convert : use PEP 585 generics and PEP 604 unions
Now that we have `from __future__ import annotations`, we can use this
modern syntax in Python 3.7 instead of restricting support to Python 3.9
or 3.10 respectively.
* gguf.py : a tuple is already a tuple
* add mypy.ini
* convert : add necessary `type: ignore` comments
* gguf-py: bump version
Diffstat (limited to 'convert-gptneox-hf-to-gguf.py')
-rwxr-xr-x | convert-gptneox-hf-to-gguf.py | 20 |
1 files changed, 11 insertions, 9 deletions
diff --git a/convert-gptneox-hf-to-gguf.py b/convert-gptneox-hf-to-gguf.py index 38e71e03..852123d9 100755 --- a/convert-gptneox-hf-to-gguf.py +++ b/convert-gptneox-hf-to-gguf.py @@ -1,18 +1,20 @@ #!/usr/bin/env python3 # HF gptneox--> gguf conversion -import gguf +from __future__ import annotations + +import argparse +import json import os -import sys import struct -import json +import sys +from pathlib import Path +from typing import Any + +import gguf import numpy as np import torch -import argparse - -from typing import Any, List -from pathlib import Path -from transformers import AutoTokenizer +from transformers import AutoTokenizer # type: ignore[import] # ref: https://github.com/openai/gpt-2/blob/master/src/encoder.py @@ -112,7 +114,7 @@ gguf_writer.add_layer_norm_eps(hparams["layer_norm_eps"]) print("gguf: get tokenizer metadata") -tokens: List[bytearray] = [] +tokens: list[bytearray] = [] tokenizer_json_file = dir_model / 'tokenizer.json' if not tokenizer_json_file.is_file(): |