summaryrefslogtreecommitdiff
path: root/convert-lora-to-ggml.py
diff options
context:
space:
mode:
authorKerfuffle <44031344+KerfuffleV2@users.noreply.github.com>2023-08-30 02:25:50 -0600
committerGitHub <noreply@github.com>2023-08-30 11:25:50 +0300
commitdc07dc492ef9640bbb82904d7c7679f7bdcf6d76 (patch)
treef9d80bc6ee29067e8e72521d75dfa2b92d85540e /convert-lora-to-ggml.py
parentad9ddcff6ef322db5cf13785bd7c856b610d242e (diff)
convert : various script cleanups/fixes + merges and special token handling (#2842)
* convert: Fix permute calls and method/func definitions * Cleanups for gguf-py * Minor types cleanups. * Initial implementation of handling merges and special tokens * convert: Handle special tokens and merges in vocab only mode convert: Vocab only mode no longer requires loading model tensors * gguf: Refactor tensor name mapping * convert: Fix type hint for special_token_types in SpecialVocab * Use common special vocab handling in various conversion scripts * First pass at implementing suggested changes * Second pass * gguf: SpecialVocab: Fix issue with special token content not in a dict gguf: SpecialVocab: Allow skipping handling of merges * convert-falcon-hf-to-gguf: Support --vocab-only option, bail out if no tokenizer.json * convert-gptneox-hf-to-gguf and convert: Only handle merges for BPE tokenizer * gguf: SpecialVocab: Actually set load_merges in object * Uniform args parsing and vocab only mode for convert examples * convert.py: Set gpt2 as tokenizer model when using BPE * Squish last type warning in gguf.py - yay!
Diffstat (limited to 'convert-lora-to-ggml.py')
-rwxr-xr-xconvert-lora-to-ggml.py6
1 files changed, 3 insertions, 3 deletions
diff --git a/convert-lora-to-ggml.py b/convert-lora-to-ggml.py
index a94a7d0a..a00339b4 100755
--- a/convert-lora-to-ggml.py
+++ b/convert-lora-to-ggml.py
@@ -4,7 +4,7 @@ import os
import re
import struct
import sys
-from typing import Any, Dict, Sequence, TextIO
+from typing import Any, Dict, Sequence, BinaryIO
import numpy as np
import torch
@@ -46,7 +46,7 @@ def translate_tensor_name(t: str) -> str:
sys.exit(1)
-def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None:
+def write_file_header(fout: BinaryIO, params: Dict[str, Any]) -> None:
fout.write(b"ggla"[::-1]) # magic (ggml lora)
fout.write(struct.pack("i", 1)) # file version
fout.write(struct.pack("i", params["r"]))
@@ -60,7 +60,7 @@ def write_file_header(fout: TextIO, params: Dict[str, Any]) -> None:
def write_tensor_header(
- self, name: str, shape: Sequence[int], data_type: np.dtype
+ self, name: str, shape: Sequence[int], data_type: np.dtype[Any]
) -> None:
sname = name.encode("utf-8")
fout.write(