summaryrefslogtreecommitdiff
path: root/convert.py
diff options
context:
space:
mode:
Diffstat (limited to 'convert.py')
-rw-r--r--convert.py1020
1 files changed, 374 insertions, 646 deletions
diff --git a/convert.py b/convert.py
index f3bf1798..c29c032c 100644
--- a/convert.py
+++ b/convert.py
@@ -1,4 +1,6 @@
#!/usr/bin/env python
+
+import gguf
import argparse
import concurrent.futures
import copy
@@ -16,13 +18,12 @@ import signal
import struct
import sys
import zipfile
+import numpy as np
+
from abc import ABCMeta, abstractmethod
from dataclasses import dataclass
from pathlib import Path
-from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List,
- Literal, Optional, Sequence, Tuple, TypeVar, Union)
-
-import numpy as np
+from typing import (IO, TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Literal, Optional, Sequence, Tuple, TypeVar, Union)
from sentencepiece import SentencePieceProcessor # type: ignore
if TYPE_CHECKING:
@@ -33,57 +34,44 @@ if hasattr(faulthandler, 'register') and hasattr(signal, 'SIGUSR1'):
NDArray: 'TypeAlias' = 'np.ndarray[Any, Any]'
+ARCH=gguf.MODEL_ARCH.LLAMA
+NAMES=gguf.MODEL_TENSOR_NAMES[ARCH]
+
+#
+# data types
+#
@dataclass(frozen=True)
class UnquantizedDataType:
name: str
-
-DT_F16 = UnquantizedDataType('F16')
-DT_F32 = UnquantizedDataType('F32')
-DT_I32 = UnquantizedDataType('I32')
+DT_F16 = UnquantizedDataType('F16')
+DT_F32 = UnquantizedDataType('F32')
+DT_I32 = UnquantizedDataType('I32')
DT_BF16 = UnquantizedDataType('BF16')
-
-@dataclass(frozen=True)
-class QuantizedDataType:
- groupsize: int
- have_addends: bool
- have_g_idx: bool
-
-
-DT_Q4_0 = QuantizedDataType(groupsize=32, have_addends=False, have_g_idx=False)
-DT_Q4_1 = QuantizedDataType(groupsize=32, have_addends=True, have_g_idx=False)
-
-DataType = Union[UnquantizedDataType, QuantizedDataType]
-
-DATA_TYPE_TO_FTYPE: Dict[DataType, int] = {
- DT_F32: 0,
- DT_F16: 1,
- DT_Q4_0: 2,
- DT_Q4_1: 3,
-}
-
-FTYPE_TO_DATA_TYPE: Dict[int, DataType] = \
- {ftype: dtype for (dtype, ftype) in DATA_TYPE_TO_FTYPE.items()}
+DataType = Union[UnquantizedDataType]
DATA_TYPE_TO_NUMPY: Dict[DataType, 'np.dtype[Any]'] = {
DT_BF16: np.dtype(np.uint16),
- DT_F16: np.dtype(np.float16),
- DT_F32: np.dtype(np.float32),
- DT_I32: np.dtype(np.int32),
+ DT_F16: np.dtype(np.float16),
+ DT_F32: np.dtype(np.float32),
+ DT_I32: np.dtype(np.int32),
}
NUMPY_TYPE_TO_DATA_TYPE: Dict['np.dtype[Any]', DataType] = \
{dtype: data_type for (data_type, dtype) in DATA_TYPE_TO_NUMPY.items()}
+SAFETENSORS_DATA_TYPES: Dict[str, DataType] = {
+ 'BF16': DT_BF16,
+ 'F16': DT_F16,
+ 'F32': DT_F32,
+ 'I32': DT_I32,
+}
class GGMLFileType(enum.Enum):
- AllF32 = 0
+ AllF32 = 0
MostlyF16 = 1 # except 1d tensors
- MostlyQ4_0 = 2 # except 1d tensors
- MostlyQ4_1 = 3 # except 1d tensors
- PerLayerIsQ4_1 = 4 # but tok_embeddings.weight and output.weight are F16
def type_for_tensor(self, name: str, tensor: 'LazyTensor') -> DataType:
if len(tensor.shape) == 1:
@@ -93,60 +81,34 @@ class GGMLFileType(enum.Enum):
return DT_F32
elif self == GGMLFileType.MostlyF16:
return DT_F16
- elif self == GGMLFileType.MostlyQ4_0:
- return DT_Q4_0
- elif self == GGMLFileType.MostlyQ4_1:
- return DT_Q4_1
- elif self == GGMLFileType.PerLayerIsQ4_1:
- if name in ('output.weight', 'tok_embeddings.weight'):
- return DT_F16
- else:
- return DT_Q4_1
else:
raise ValueError(self)
-def make_tensors_list() -> List[str]:
- ret = [
- 'tok_embeddings.weight',
- 'norm.weight',
- 'output.weight',
- ]
- for i in range(80): # maximum number of layer
- ret += [
- f'layers.{i}.attention.wq.weight',
- f'layers.{i}.attention.wk.weight',
- f'layers.{i}.attention.wv.weight',
- f'layers.{i}.attention.wo.weight',
- f'layers.{i}.attention_norm.weight',
- f'layers.{i}.feed_forward.w1.weight',
- f'layers.{i}.feed_forward.w2.weight',
- f'layers.{i}.feed_forward.w3.weight',
- f'layers.{i}.ffn_norm.weight',
- ]
- return ret
-
-
-TENSORS_LIST = make_tensors_list()
-TENSORS_SET = set(TENSORS_LIST)
-
-
-def find_n_mult(n_ff: int, n_embd: int) -> int:
- # hardcoded magic range
- for n_mult in range(8192, 1, -1):
- calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
- if calc_ff == n_ff:
- return n_mult
- raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
+#
+# hparams loading
+#
@dataclass
class Params:
- n_vocab: int
- n_embd: int
- n_mult: int
- n_head: int
- n_layer: int
- n_kv_head: Optional[int] # This parameter is only used for Llama 2
+ n_vocab: int
+ n_embd: int
+ n_mult: int
+ n_layer: int
+ n_ctx: int
+ n_ff: int
+ n_head: int
+ n_head_kv: int
+ f_norm_eps: float
+
+ @staticmethod
+ def find_n_mult(n_ff: int, n_embd: int) -> int:
+ # hardcoded magic range
+ for n_mult in range(8192, 1, -1):
+ calc_ff = (((8*n_embd) // 3 + n_mult - 1) // n_mult)*n_mult
+ if calc_ff == n_ff:
+ return n_mult
+ raise Exception(f"failed to find n_mult for (n_ff={n_ff}, n_embd={n_embd}).")
@staticmethod
def guessed(model: 'LazyModel') -> 'Params':
@@ -165,37 +127,57 @@ class Params:
raise Exception("failed to guess 'n_layer'. This model is unknown or unsupported.\n"
"Suggestion: provide 'config.json' of the model in the same directory containing model files.")
- n_head=n_embd // 128 # guessed
+ n_head = n_embd // 128 # guessed
+ n_mult = 256 # guessed
+
+ # TODO: verify this
+ n_ff = int(2 * (4 * n_embd) / 3)
+ n_ff = n_mult * ((n_ff + n_mult - 1) // n_mult)
return Params(
- n_vocab = n_vocab,
- n_embd = n_embd,
- n_mult = 256,
- n_head = n_head,
- n_layer = n_layer,
- n_kv_head = None,
+ n_vocab = n_vocab,
+ n_embd = n_embd,
+ n_mult = n_mult,
+ n_layer = n_layer,
+ n_ctx = -1,
+ n_ff = n_ff,
+ n_head = n_head,
+ n_head_kv = n_head,
+ f_norm_eps = 1e-5,
)
@staticmethod
def loadHFTransformerJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
config = json.load(open(config_path))
- n_vocab = config["vocab_size"];
- n_embd = config["hidden_size"];
- n_head = config["num_attention_heads"];
- n_layer = config["num_hidden_layers"];
- n_ff = config["intermediate_size"];
- n_kv_head = config.get("num_key_value_heads")
+ n_vocab = config["vocab_size"]
+ n_embd = config["hidden_size"]
+ n_layer = config["num_hidden_layers"]
+ n_ff = config["intermediate_size"]
+ n_head = config["num_attention_heads"]
+ n_head_kv = config["num_key_value_heads"] if "num_key_value_heads" in config else n_head
+ f_norm_eps = config["rms_norm_eps"]
- n_mult = find_n_mult(n_ff, n_embd);
+ n_mult = Params.find_n_mult(n_ff, n_embd)
+
+ if "max_sequence_length" in config:
+ n_ctx = config["max_sequence_length"]
+ elif "max_position_embeddings" in config:
+ n_ctx = config["max_position_embeddings"]
+ else:
+ raise Exception("failed to guess 'n_ctx'. This model is unknown or unsupported.\n"
+ "Suggestion: provide 'config.json' of the model in the same directory containing model files.")
return Params(
- n_vocab = n_vocab,
- n_embd = n_embd,
- n_mult = n_mult,
- n_head = n_head,
- n_layer = n_layer,
- n_kv_head = n_kv_head,
+ n_vocab = n_vocab,
+ n_embd = n_embd,
+ n_mult = n_mult,
+ n_layer = n_layer,
+ n_ctx = n_ctx,
+ n_ff = n_ff,
+ n_head = n_head,
+ n_head_kv = n_head_kv,
+ f_norm_eps = f_norm_eps,
)
# LLaMA v2 70B params.json
@@ -204,22 +186,32 @@ class Params:
def loadOriginalParamsJson(model: 'LazyModel', config_path: 'Path') -> 'Params':
config = json.load(open(config_path))
- n_vocab = config["vocab_size"];
- n_embd = config["dim"];
- n_head = config["n_heads"];
- n_layer = config["n_layers"];
- n_mult = config["multiple_of"];
+ n_vocab = config["vocab_size"]
+ n_embd = config["dim"]
+ n_layer = config["n_layers"]
+ n_mult = config["multiple_of"]
+ n_ctx = 2048 if config["norm_eps"] == 1e-06 else 4096 # hack to determine LLaMA v1 vs v2
+ n_ff = -1
+ n_head = config["n_heads"]
+ n_head_kv = config["n_kv_heads"] if "n_kv_heads" in config else n_head
+ f_norm_eps = config["norm_eps"]
if n_vocab == -1:
n_vocab = model["tok_embeddings.weight"].shape[0]
+ if n_ff == -1:
+ n_ff = model["layers.0.feed_forward.w1.weight"].shape[0]
+
return Params(
- n_vocab = n_vocab,
- n_embd = n_embd,
- n_mult = n_mult,
- n_head = n_head,
- n_layer = n_layer,
- n_kv_head = None,
+ n_vocab = n_vocab,
+ n_embd = n_embd,
+ n_mult = n_mult,
+ n_layer = n_layer,
+ n_ctx = n_ctx,
+ n_ff = n_ff,
+ n_head = n_head,
+ n_head_kv = n_head_kv,
+ f_norm_eps = f_norm_eps,
)
@staticmethod
@@ -234,30 +226,73 @@ class Params:
else:
params = Params.guessed(model_plus.model)
- print(f'params: n_vocab:{params.n_vocab} n_embd:{params.n_embd} n_mult:{params.n_mult} n_head:{params.n_head} n_layer:{params.n_layer}')
return params
-class SentencePieceVocab:
- def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path], vocabtype: Optional[str]) -> None:
- self.vocabtype = vocabtype
- if self.vocabtype == "bpe":
- self.sentencepiece_tokenizer = json.loads(open(str(fname_tokenizer)).read())
- else:
- self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
+#
+# vocab
+#
+
+class BpeVocab:
+ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
+ self.bpe_tokenizer = json.loads(open(str(fname_tokenizer), encoding="utf-8").read())
added_tokens: Dict[str, int]
if fname_added_tokens is not None:
- added_tokens = json.load(open(fname_added_tokens))
+ added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
else:
added_tokens = {}
- if self.vocabtype == "bpe":
- vocab_size: int = len(self.sentencepiece_tokenizer)
+
+ vocab_size: int = len(self.bpe_tokenizer)
+ expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
+ actual_ids = sorted(added_tokens.values())
+ if expected_ids != actual_ids:
+ raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
+
+ items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
+ self.added_tokens_list = [text for (text, idx) in items]
+ self.vocab_size_base: int = vocab_size
+ self.vocab_size: int = self.vocab_size_base + len(self.added_tokens_list)
+ self.fname_tokenizer = fname_tokenizer
+ self.fname_added_tokens = fname_added_tokens
+
+ def bpe_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
+ tokenizer = self.bpe_tokenizer
+ from transformers.models.gpt2 import tokenization_gpt2
+ byte_encoder = tokenization_gpt2.bytes_to_unicode()
+ byte_decoder = {v: k for k, v in byte_encoder.items()}
+ for i, item in enumerate(tokenizer):
+ text: bytes = item.encode("utf-8")
+ score: float = -i
+ yield text, score, gguf.TokenType.USER_DEFINED
+
+ def added_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
+ for text in self.added_tokens_list:
+ score = -1000.0
+ yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
+
+ def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
+ yield from self.bpe_tokens()
+ yield from self.added_tokens()
+
+ def __repr__(self) -> str:
+ return f"BpeVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
+
+
+class SentencePieceVocab:
+ def __init__(self, fname_tokenizer: Path, fname_added_tokens: Optional[Path]) -> None:
+ self.sentencepiece_tokenizer = SentencePieceProcessor(str(fname_tokenizer))
+ added_tokens: Dict[str, int]
+ if fname_added_tokens is not None:
+ added_tokens = json.load(open(fname_added_tokens, encoding="utf-8"))
else:
- vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
+ added_tokens = {}
+
+ vocab_size: int = self.sentencepiece_tokenizer.vocab_size()
expected_ids = list(range(vocab_size, vocab_size + len(added_tokens)))
- actual_ids = sorted(added_tokens.values())
+ actual_ids = sorted(added_tokens.values())
if expected_ids != actual_ids:
raise Exception(f"Expected added token IDs to be sequential and start at {len(added_tokens)}; got {actual_ids}")
+
items = sorted(added_tokens.items(), key=lambda text_idx: text_idx[1])
self.added_tokens_list = [text for (text, idx) in items]
self.vocab_size_base: int = vocab_size
@@ -265,117 +300,66 @@ class SentencePieceVocab:
self.fname_tokenizer = fname_tokenizer
self.fname_added_tokens = fname_added_tokens
- def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float]]:
+ def sentencepiece_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
tokenizer = self.sentencepiece_tokenizer
- if self.vocabtype == "bpe":
- from transformers.models.gpt2 import tokenization_gpt2
- byte_encoder = tokenization_gpt2.bytes_to_unicode()
- byte_decoder = {v: k for k, v in byte_encoder.items()}
- for i, item in enumerate(tokenizer):
- text: bytes
- text = b''.join([x.to_bytes(1, byteorder='big') for x in [byte_decoder[y] for y in item]])
- score: float = -i
- yield text, score
- else:
- for i in range(tokenizer.vocab_size()):
- text: bytes
- if tokenizer.is_unknown(i):
- text = " \u2047 ".encode("utf-8")
- elif tokenizer.is_control(i):
- text = b""
- elif tokenizer.is_byte(i):
- piece = tokenizer.id_to_piece(i)
- if len(piece) != 6:
- raise Exception(f"Invalid token: {piece}")
- byte_value = int(piece[3:-1], 16)
- text = struct.pack("B", byte_value)
- else:
- text = tokenizer.id_to_piece(i).replace("\u2581", " ").encode("utf-8")
- score: float = tokenizer.get_score(i)
- yield text, score
-
- def added_tokens(self) -> Iterable[Tuple[bytes, float]]:
+ for i in range(tokenizer.vocab_size()):
+ piece = tokenizer.id_to_piece(i)
+ text: bytes = piece.encode("utf-8")
+ score: float = tokenizer.get_score(i)
+
+ toktype = gguf.TokenType.NORMAL
+ if tokenizer.is_unknown(i):
+ toktype = gguf.TokenType.UNKNOWN
+ if tokenizer.is_control(i):
+ toktype = gguf.TokenType.CONTROL
+
+ # NOTE: I think added_tokens are user defined.
+ # ref: https://github.com/google/sentencepiece/blob/master/src/sentencepiece_model.proto
+ # if tokenizer.is_user_defined(i): toktype = gguf.TokenType.USER_DEFINED
+
+ if tokenizer.is_unused(i):
+ toktype = gguf.TokenType.UNUSED
+ if tokenizer.is_byte(i):
+ toktype = gguf.TokenType.BYTE
+
+ yield text, score, toktype
+
+ def added_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
for text in self.added_tokens_list:
score = -1000.0
- yield text.encode("utf-8"), score
+ yield text.encode("utf-8"), score, gguf.TokenType.USER_DEFINED
- def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
+ def all_tokens(self) -> Iterable[Tuple[bytes, float, gguf.TokenType]]:
yield from self.sentencepiece_tokens()
yield from self.added_tokens()
def __repr__(self) -> str:
return f"<SentencePieceVocab with {self.vocab_size_base} base tokens and {len(self.added_tokens_list)} added tokens>"
+Vocab = Union[BpeVocab, SentencePieceVocab]
-class GGMLVocab:
- def __init__(self, tokens: List[Tuple[bytes, float]]):
- self.tokens = tokens
- self.vocab_size = len(tokens)
-
- def all_tokens(self) -> Iterable[Tuple[bytes, float]]:
- return self.tokens
-
- def __repr__(self) -> str:
- return f"<GGMLVocab with {self.vocab_size} tokens>"
-
-
-Vocab = Union[SentencePieceVocab, GGMLVocab]
+#
+# data loading
+# TODO: reuse (probably move to gguf.py?)
+#
-def permute(weights: NDArray, n_head: int, n_kv_head: Optional[int] = None) -> NDArray:
- if n_kv_head is not None and n_head != n_kv_head:
- n_head //= n_kv_head
+def permute(weights: NDArray, n_head: int, n_head_kv: int) -> NDArray:
+ #print( "permute debug " + str(weights.shape[0]) + " x " + str(weights.shape[1]) + " nhead " + str(n_head) + " nheadkv " + str(n_kv_head) )
+ if n_head_kv is not None and n_head != n_head_kv:
+ n_head //= n_head_kv
return (weights.reshape(n_head, 2, weights.shape[0] // n_head // 2, *weights.shape[1:])
.swapaxes(1, 2)
.reshape(weights.shape))
-def dequantize_q4(qvalues_pack32: NDArray, scales: NDArray, addends: Optional[NDArray], g_idx: Optional[NDArray]) -> NDArray:
- # First reinterpret each row from a list of int32s containing 8 values each
- # to a list of uint8s containing 2 values each.
- qvalues_pack8 = qvalues_pack32.view(np.uint8)
-
- # Then split out the two values per int8 (which requires an actual
- # conversion because numpy doesn't natively support int4s).
- qvalues = np.zeros([qvalues_pack8.shape[0], qvalues_pack8.shape[1] * 2], dtype=np.uint8)
- qvalues[:, 0::2] = qvalues_pack8 & 0xf
- qvalues[:, 1::2] = qvalues_pack8 >> 4
-
- assert addends is None or addends.shape == scales.shape
- assert qvalues.shape[0] == scales.shape[0]
- assert qvalues.shape[1] % scales.shape[1] == 0
- if g_idx is None:
- repeat_count = qvalues.shape[1] // scales.shape[1]
- scales = scales[:, :, np.newaxis]
- if addends is not None:
- addends = addends[:, :, np.newaxis]
- # Reshape so that the below computation broadcasts over scales and addends:
- qvalues.shape = (qvalues.shape[0], scales.shape[1], int(repeat_count))
- else:
- # In this case the scale and addend is selected for each column by g_idx:
- assert addends is not None
- scales = scales[:, g_idx]
- addends = addends[:, g_idx]
- if addends is None:
- # Q4_0
- qvalues = qvalues.view(np.int8)
- qvalues -= 8
- # And do the actual 'value = scale * qvalue + addend' computation.
- values = scales * qvalues
- if addends is not None:
- values += addends
- if g_idx is None:
- values.shape = (values.shape[0], values.shape[1] * values.shape[2])
- return values
-
-
class Tensor(metaclass=ABCMeta):
data_type: DataType
@abstractmethod
def astype(self, data_type: DataType) -> 'Tensor': ...
@abstractmethod
- def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'Tensor': ...
+ def permute(self, n_head: int, n_head_kv: int) -> 'Tensor': ...
@abstractmethod
def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor': ...
@abstractmethod
@@ -413,8 +397,8 @@ class UnquantizedTensor(Tensor):
r = self.ndarray.shape[0] // 3
return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
- def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'UnquantizedTensor':
- return UnquantizedTensor(permute(self.ndarray, n_head, n_kv_head))
+ def permute(self, n_head: int, n_head_kv: int) -> 'UnquantizedTensor':
+ return UnquantizedTensor(permute(self.ndarray, n_head, n_head_kv))
def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, convert: bool = False) -> NDArray:
@@ -433,183 +417,25 @@ def load_unquantized(lazy_tensor: 'LazyTensor', expected_dtype: Any = None, conv
return tensor.ndarray
-class GGMLQuantizedTensor(Tensor):
- data_type: QuantizedDataType
-
- def __init__(self, ndarray: NDArray, shape: List[int], data_type: DataType) -> None:
- rows, columns = shape
- assert data_type in (DT_Q4_1, DT_Q4_0) # for now
- assert isinstance(data_type, QuantizedDataType) # redundant, but mypy complains without this
- assert columns % data_type.groupsize == 0
- words_in_block = 6 if data_type == DT_Q4_1 else 5
- self.ndarray = ndarray.view(dtype=np.uint32).reshape((rows, columns // data_type.groupsize, words_in_block))
- self.shape = shape[:]
- self.data_type = data_type
-
- def astype(self, data_type: DataType) -> Tensor:
- if data_type == self.data_type:
- return self
- scales = self.ndarray[:, :, 0].view(np.float32)
- if self.data_type.have_addends:
- addends = self.ndarray[:, :, 1].view(np.float32)
- else:
- addends = None
- qweights = self.ndarray[:, :, -4:].reshape([self.shape[0], self.shape[1] // 8])
-
- dq = dequantize_q4(qweights, scales, addends, g_idx=None)
- return UnquantizedTensor(dq).astype(data_type)
-
- def to_ggml(self) -> 'GGMLQuantizedTensor':
- return self
-
- def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> 'GGMLQuantizedTensor':
- return GGMLQuantizedTensor(permute(self.ndarray, n_head, n_kv_head), self.shape, self.data_type)
-
- def permute_part(self, n_part: int, n_head: int) -> 'UnquantizedTensor':
- r = self.ndarray.shape[0] // 3
- return UnquantizedTensor(permute(self.ndarray[r * n_part : r * n_part + r, ...], n_head))
-
- def part(self, n_part: int) -> 'UnquantizedTensor':
- r = self.ndarray.shape[0] // 3
- return UnquantizedTensor(self.ndarray[r * n_part : r * n_part + r, ...])
-
-GGMLCompatibleTensor = Union[UnquantizedTensor, GGMLQuantizedTensor]
+GGMLCompatibleTensor = Union[UnquantizedTensor]
class DeferredPermutedTensor(Tensor):
- def __init__(self, base: Tensor, n_head: int, n_kv_head: Optional[int] = None) -> None:
+ def __init__(self, base: Tensor, n_head: int, n_head_kv: int) -> None:
self.base = base
self.n_head = n_head
- self.n_kv_head = n_kv_head
self.data_type = self.base.data_type
def astype(self, data_type: DataType) -> Tensor:
- return self.base.astype(data_type).permute(self.n_head, self.n_kv_head)
+ return self.base.astype(data_type).permute(self.n_head, self.n_head_kv)
def to_ggml(self) -> GGMLCompatibleTensor:
- return self.base.to_ggml().permute(self.n_head, self.n_kv_head)
+ return self.base.to_ggml().permute(self.n_head, self.n_head_kv)
- def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor:
+ def permute(self, n_head: int, n_head_kv: int) -> Tensor:
raise Exception("shouldn't permute twice")
-class GPTQForLLaMaQuantizedTensor(Tensor):
- def __init__(self, model: 'LazyModel', namebase: str) -> None:
- qweight = load_unquantized(model[f"{namebase}.qweight"], np.int32)
- scales = load_unquantized(model[f"{namebase}.scales"], np.float32, convert=True)
-
- bias = model.get(f"{namebase}.bias")
- if bias is not None:
- # Q4_1 does not support bias; good thing the bias is always all zeros.
- assert not np.any(load_unquantized(bias))
-
- if f"{namebase}.zeros" in model:
- zeros = load_unquantized(model[f"{namebase}.zeros"], np.float32)
- else:
- qzeros = load_unquantized(model[f"{namebase}.qzeros"], np.int32)
- assert qzeros.dtype == np.int32
- zeros = dequantize_q4(qzeros, scales, scales, g_idx=None)
- assert zeros.dtype == np.float32
-
- assert zeros.shape == scales.shape
-
- # Output is transposed compared to the input, and addends have their sign flipped.
- # Scales and zeros similarly must be transposed but only for newer
- # versions of GPTQ-for-LLaMa; the older versions can be identified by
- # having shape (n_embd, 1).
- qweight = qweight.T
- if scales.shape[1] != 1:
- scales = scales.T
- zeros = zeros.T
-
- # Output also has signs flipped for the addends.
- self.qweight = qweight
- self.scales = scales
- self.addends = -zeros
-
- self.g_idx: Optional[NDArray]
- if f"{namebase}.g_idx" in model:
- self.g_idx = load_unquantized(model[f"{namebase}.g_idx"], np.int32)
- assert self.g_idx.shape == (qweight.shape[1] * 8,)
- else:
- self.g_idx = None
-
- self.shape = [self.qweight.shape[0], self.qweight.shape[1] * 8]
- self.data_type = QuantizedDataType(groupsize=self.groupsize(), have_addends=True,
- have_g_idx=(self.g_idx is not None))
-
- def inspect(self, row: int, col: int) -> None:
- '''For debugging.'''
- qweight = (self.qweight[row, col // 8] >> (4 * (col & 7))) & 0xf
- if self.g_idx is not None:
- group = self.g_idx[col]
- else:
- group = int(col // self.groupsize())
- scale = self.scales[row, group]
- addend = self.addends[row, group]
- with np.printoptions(precision=None, suppress=True):
- print(f'scale:{scale} addend:{addend} qweight:{qweight}')
- print('possible values:', np.arange(16) * scale + addend)
- print('actual value:', qweight * scale + addend)
-
- def astype(self, data_type: DataType) -> Tensor:
- if isinstance(data_type, QuantizedDataType):
- assert self.g_idx is None and data_type.have_addends is True and data_type.have_g_idx is False
- return self.regroup(data_type.groupsize)
-
- dequantized = dequantize_q4(np.ascontiguousarray(self.qweight), self.scales, self.addends, self.g_idx)
- return UnquantizedTensor(dequantized).astype(data_type)
-
- def groupsize(self) -> int:
- assert self.addends.shape == self.scales.shape
- assert self.shape[1] % self.scales.shape[1] == 0
- return self.shape[1] // self.scales.shape[1]
-
- def regroup(self, new_groupsize: int = 32) -> 'GPTQForLLaMaQuantizedTensor':
- # Old versions of GPTQ-for-LLaMa shared scales and addends between all the
- # columns in a row. Newer versions share them between every set of N
- # columns in a row, where N is the `groupsize` parameter, usually 128. The
- # output format shares them between every set of 32 columns. To handle
- # this, duplicate scales and addends for every smaller group.
- # (In the above, 'row' and 'column' are in the sense of the output.)
- assert self.g_idx is None
- old_groupsize = self.groupsize()
- assert old_groupsize >= new_groupsize and old_groupsize % new_groupsize == 0, old_groupsize
- ret = copy.copy(self)
- ret.addends = self.addends.repeat(old_groupsize // new_groupsize, axis=1)
- ret.scales = self.scales.repeat(old_groupsize // new_groupsize, axis=1)
- ret.data_type = QuantizedDataType(groupsize=new_groupsize, have_addends=True, have_g_idx=False)
- return ret
-
- def permute(self, n_head: int, n_kv_head: Optional[int] = None) -> Tensor:
- return DeferredPermutedTensor(self, n_head, n_kv_head)
-
- def to_ggml(self) -> GGMLQuantizedTensor:
- # The output format looks like this:
- # For each row:
- # For each group of 32 columns:
- # - addend (float32, 4 bytes)
- # - scale (float32, 4 bytes)
- # - weights (int4 * 32, 16 bytes)
-
- if self.groupsize() != 32:
- raise Exception("should have been regrouped before converting to ggml")
-
- # Since the output format is mixed between integers and floats, we have
- # to hackily view the floats as int32s just so numpy will let us
- # concatenate them.
- addends_view = self.addends.view(dtype=np.int32)[:, :, np.newaxis]
- scales_view = self.scales.view(dtype=np.int32)[:, :, np.newaxis]
-
- # Split into groups of 4 columns (i.e. 32 columns of quantized data):
- grouped = self.qweight.reshape([self.qweight.shape[0], self.qweight.shape[1] // 4, 4])
-
- # And concatenate:
- grouped = np.concatenate([scales_view, addends_view, grouped], axis=2, casting='no')
-
- return GGMLQuantizedTensor(grouped, self.shape, DT_Q4_1)
-
-
@dataclass
class LazyTensor:
_load: Callable[[], Tensor]
@@ -632,17 +458,6 @@ class LazyTensor:
def validate_conversion_to(self, data_type: DataType) -> None:
if data_type == self.data_type:
return
- if isinstance(data_type, QuantizedDataType):
- if not isinstance(self.data_type, QuantizedDataType):
- raise Exception(f"Can't turn an unquantized tensor into a quantized type ({data_type})")
- if self.data_type.have_g_idx:
- sys.stderr.write(
- "Error: Input uses the newer GPTQ-for-LLaMa format (using g_idx), "
- "which is not yet natively supported by GGML. "
- "For now you can still convert this model by passing `--outtype f16` to dequantize, "
- "but that will result in a much larger output file for no quality benefit.\n")
- sys.exit(1)
- assert not data_type.have_g_idx and self.data_type.have_addends and data_type.have_addends
LazyModel = Dict[str, LazyTensor]
@@ -713,10 +528,10 @@ def merge_multifile_models(models_plus: List[ModelPlus]) -> ModelPlus:
return ModelPlus(model, paths, format, vocab)
-def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_kv_head: Optional[int] = None) -> LazyTensor:
+def permute_lazy(lazy_tensor: LazyTensor, n_head: int, n_head_kv: int) -> LazyTensor:
def load() -> Tensor:
- return lazy_tensor.load().permute(n_head, n_kv_head)
- return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_kv_head}) ' + lazy_tensor.description)
+ return lazy_tensor.load().permute(n_head, n_head_kv)
+ return LazyTensor(load, lazy_tensor.shape, lazy_tensor.data_type, f'permute({n_head}, {n_head_kv}) ' + lazy_tensor.description)
def permute_part_lazy(lazy_tensor: LazyTensor, n_part: int, n_head: int) -> LazyTensor:
def load() -> Tensor:
@@ -732,66 +547,6 @@ def part_lazy(lazy_tensor: LazyTensor, n_part: int) -> LazyTensor:
s[0] = s[0] // 3
return LazyTensor(load, s, lazy_tensor.data_type, 'part ' + lazy_tensor.description)
-def convert_transformers_to_orig(model: LazyModel, params: Params) -> LazyModel:
- out: LazyModel = {}
- out["tok_embeddings.weight"] = model["model.embed_tokens.weight"]
- out["norm.weight"] = model["model.norm.weight"]
- out["output.weight"] = model["lm_head.weight"]
-
- for i in itertools.count():
- if f"model.layers.{i}.self_attn.q_proj.weight" in model:
- out[f"layers.{i}.attention.wq.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head)
- out[f"layers.{i}.attention.wk.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_kv_head)
- out[f"layers.{i}.attention.wv.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
- elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
- out[f"layers.{i}.attention.wq.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head)
- out[f"layers.{i}.attention.wk.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head)
- out[f"layers.{i}.attention.wv.weight"] = part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
- else:
- break
-
- out[f"layers.{i}.attention.wo.weight"] = model[f"model.layers.{i}.self_attn.o_proj.weight"]
-
- out[f"layers.{i}.feed_forward.w1.weight"] = model[f"model.layers.{i}.mlp.gate_proj.weight"]
- out[f"layers.{i}.feed_forward.w2.weight"] = model[f"model.layers.{i}.mlp.down_proj.weight"]
- out[f"layers.{i}.feed_forward.w3.weight"] = model[f"model.layers.{i}.mlp.up_proj.weight"]
-
- out[f"layers.{i}.attention_norm.weight"] = model[f"model.layers.{i}.input_layernorm.weight"]
- out[f"layers.{i}.ffn_norm.weight"] = model[f"model.layers.{i}.post_attention_layernorm.weight"]
- return out
-
-
-def handle_quantization(model: LazyModel) -> LazyModel:
- '''Convert a model with entries for 'foo.qweight', 'foo.scales', etc.
- (which resolve to UnquantizedTensors with the raw data) to one with entries
- for 'foo.weight' (which resolve to QuantizedTensors).
- '''
- def convert(name: str) -> Tuple[str, LazyTensor]:
- if name.endswith(".qweight"):
- namebase = name.rsplit('.', 1)[0]
- orig_name = namebase + ".weight"
-
- lazy_tensor = model[name]
- assert len(lazy_tensor.shape) == 2
- real_shape = [lazy_tensor.shape[1], lazy_tensor.shape[0] * 8]
-
- # Calculate type. This replicates the logic in
- # GPTQForLLaMaQuantizedTensor (which is executed when the modelis
- # actually loaded).
- lazy_scales = model[f"{namebase}.scales"]
- scales_width = 1 if lazy_scales.shape[1] == 1 else lazy_scales.shape[0]
- assert real_shape[1] % scales_width == 0
- groupsize = real_shape[1] // scales_width
- have_g_idx = f"{namebase}.g_idx" in model
- data_type = QuantizedDataType(groupsize=groupsize, have_addends=True, have_g_idx=have_g_idx)
-
- def load() -> Tensor:
- return GPTQForLLaMaQuantizedTensor(model, namebase)
-
- return (orig_name, LazyTensor(load, real_shape, data_type, '[quantized]'))
- else:
- return (name, model[name])
- return dict(convert(name) for name in model)
# Functionality that simulates `torch.load` but where individual tensors are
# only loaded into memory on demand, not all at once.
@@ -885,14 +640,6 @@ def lazy_load_torch_file(outer_fp: IO[bytes], path: Path) -> ModelPlus:
return ModelPlus(model=as_dict, paths=[path], format='torch', vocab=None)
-SAFETENSORS_DATA_TYPES: Dict[str, DataType] = {
- 'BF16': DT_BF16,
- 'F16': DT_F16,
- 'F32': DT_F32,
- 'I32': DT_I32,
-}
-
-
def lazy_load_safetensors_file(fp: IO[bytes], path: Path) -> ModelPlus:
header_size, = struct.unpack('<Q', fp.read(8))
header: Dict[str, Dict[str, Any]] = json.loads(fp.read(header_size))
@@ -924,84 +671,6 @@ def must_read(fp: IO[bytes], length: int) -> bytes:
return ret
-def lazy_load_ggml_file(fp: io.BufferedReader, path: Path) -> ModelPlus:
- magic = must_read(fp, 4)[::-1]
- if magic in (b'ggmf', b'ggjt'):
- version, = struct.unpack("i", must_read(fp, 4))
- assert version == 1
- else:
- assert magic == b'ggml'
- version = None
- n_vocab, n_embd, n_mult, n_head, n_layer, rot, file_type = struct.unpack('<7i', must_read(fp, 28))
-
- tokens: List[Tuple[bytes, float]] = []
- for i in range(n_vocab):
- if i == 32000:
- # HACK: GPT4All messed with the format without changing the magic
- # number. Specifically, they changed the vocab section to contain
- # `n_vocab - 1` tokens instead of `n_vocab` (i.e. omitting the
- # extra pad token). Try to detect if we're reading a file like
- # this.
- orig_pos = fp.tell()
- fp.seek(20, io.SEEK_CUR)
- is_gpt4all = fp.read(21) == b'tok_embeddings.weight'
- fp.seek(orig_pos)
- if is_gpt4all:
- break
-
- length, = struct.unpack("i", must_read(fp, 4))
- text = must_read(fp, length)
- if magic != b'ggml':
- score, = struct.unpack("f", must_read(fp, 4))
- tokens.append((text, score))
- vocab = GGMLVocab(tokens) if magic != b'ggml' else None
-
- model: LazyModel = {}
- # Use mmap for the actual data to avoid race conditions with the file offset.
- off = fp.raw.tell()
- mapped = memoryview(mmap.mmap(fp.fileno(), 0, access=mmap.ACCESS_READ))
- fp.raw.seek(off) # needed on Windows
-
- def read_tensor() -> None: # this is a function so that variables captured in `load` don't change
- shape_len, name_len, ftype = struct.unpack("iii", must_read(fp, 12))
- assert 0 <= shape_len <= 3
- shape: List[int] = list(struct.unpack(f"{shape_len}i", must_read(fp, 4 * shape_len)))
- shape = shape[::-1]
- name = must_read(fp, name_len).decode('utf-8')
- data_type = FTYPE_TO_DATA_TYPE[ftype]
-
- if magic == b'ggjt':
- fp.seek((fp.tell() + 31) & -32)
-
- if data_type == DT_Q4_1:
- # See GPTQForLLaMaQuantizedTensor.ggml_ndarray()
- size = 24 * (shape[1] // 32) * shape[0]
- elif data_type == DT_Q4_0:
- size = 20 * (shape[1] // 32) * shape[0]
- else:
- numpy_dtype = DATA_TYPE_TO_NUMPY[data_type]
- elm_count = math.prod(shape)
- size = elm_count * numpy_dtype.itemsize
- offset = fp.tell()
- buf = mapped[offset:offset+size]
- fp.seek(size, io.SEEK_CUR)
-
- def load() -> Tensor:
- if isinstance(data_type, QuantizedDataType):
- ndarray = np.frombuffer(buf, dtype=np.uint32)
- return GGMLQuantizedTensor(ndarray, shape, data_type)
- else:
- return UnquantizedTensor(np.frombuffer(buf, dtype=numpy_dtype).reshape(shape))
- description = f'ggml offset={offset} type={data_type} path={path}'
- model[name] = LazyTensor(load, shape, data_type, description)
-
- while fp.read(1) != b'':
- fp.seek(-1, io.SEEK_CUR)
- read_tensor()
-
- return ModelPlus(model=model, paths=[path], format='ggml', vocab=vocab)
-
-
@functools.lru_cache(maxsize=None)
def lazy_load_file(path: Path) -> ModelPlus:
fp = open(path, 'rb')
@@ -1010,9 +679,6 @@ def lazy_load_file(path: Path) -> ModelPlus:
if first8[:2] == b'PK':
# A zip file, i.e. PyTorch format
return lazy_load_torch_file(fp, path)
- elif first8[2:4] == b'gg':
- # GGML format
- return lazy_load_ggml_file(fp, path)
elif struct.unpack('<Q', first8)[0] < 16 * 1024 * 1024:
# Probably safetensors
return lazy_load_safetensors_file(fp, path)
@@ -1023,7 +689,6 @@ def lazy_load_file(path: Path) -> ModelPlus:
In = TypeVar('In')
Out = TypeVar('Out')
-
def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], concurrency: int) -> Iterable[Out]:
'''Parallel map, but with backpressure. If the caller doesn't call `next`
fast enough, this will stop calling `func` at some point rather than
@@ -1043,8 +708,7 @@ def bounded_parallel_map(func: Callable[[In], Out], iterable: Iterable[In], conc
def check_vocab_size(params: Params, vocab: Vocab) -> None:
if params.n_vocab != vocab.vocab_size:
- # GGMLVocab comes from the same file as the model so shouldn't mismatch:
- assert isinstance(vocab, SentencePieceVocab)
+ assert isinstance(vocab, BpeVocab) or isinstance(vocab, SentencePieceVocab)
if params.n_vocab == vocab.vocab_size_base:
print("Ignoring added_tokens.json since model matches vocab size without it.")
vocab.added_tokens_list = []
@@ -1061,98 +725,154 @@ def check_vocab_size(params: Params, vocab: Vocab) -> None:
class OutputFile:
def __init__(self, fname_out: Path) -> None:
- self.fout = open(fname_out, "wb")
-
- def write_file_header(self, params: Params, file_type: GGMLFileType) -> None:
- self.fout.write(b"ggjt"[::-1]) # magic
- values = [
- 1, # file version
- params.n_vocab,
- params.n_embd,
- params.n_mult,
- params.n_head,
- params.n_layer,
- params.n_embd // params.n_head, # rot (obsolete)
- file_type.value,
- ]
- self.fout.write(struct.pack("i" * len(values), *values))
-
- def write_tensor_header(self, name: str, shape: Sequence[int], data_type: DataType) -> None:
- sname = name.encode('utf-8')
- self.fout.write(struct.pack("iii", len(shape), len(sname), DATA_TYPE_TO_FTYPE[data_type]))
- self.fout.write(struct.pack("i" * len(shape), *shape[::-1]))
- self.fout.write(sname)
- self.fout.seek((self.fout.tell() + 31) & -32)
-
- def write_vocab(self, vocab: Vocab) -> None:
- for text, score in vocab.all_tokens():
- self.fout.write(struct.pack("i", len(text)))
- self.fout.write(text)
- self.fout.write(struct.pack("f", score))
+ self.gguf = gguf.GGUFWriter(fname_out, gguf.MODEL_ARCH_NAMES[ARCH])
+
+ def add_meta_arch(self, params: Params) -> None:
+ self.gguf.add_name ("LLaMA")
+ self.gguf.add_context_length (params.n_ctx)
+ self.gguf.add_embedding_length (params.n_embd)
+ self.gguf.add_block_count (params.n_layer)
+ self.gguf.add_feed_forward_length (params.n_ff)
+ self.gguf.add_rope_dimension_count(params.n_embd // params.n_head)
+ self.gguf.add_head_count (params.n_head)
+ self.gguf.add_head_count_kv (params.n_head_kv)
+ self.gguf.add_layer_norm_rms_eps (params.f_norm_eps)
+
+ def add_meta_vocab(self, vocab: Vocab) -> None:
+ tokens = []
+ scores = []
+ toktypes = []
+ # NOTE: `all_tokens` returns the the base vocabulary and added tokens
+ # TODO: add special tokens?
+ for text, score, toktype in vocab.all_tokens():
+ tokens.append(text)
+ scores.append(score)
+ toktypes.append(toktype)
+
+ self.gguf.add_tokenizer_model("llama")
+ self.gguf.add_token_list(tokens)
+ self.gguf.add_token_scores(scores)
+ self.gguf.add_token_types(toktypes)
+
+ def add_tensor_info(self, name: str, tensor: LazyTensor) -> None:
+ n_elements = 1
+ for dim in tensor.shape:
+ n_elements *= dim
+ data_type = DATA_TYPE_TO_NUMPY[tensor.data_type]
+ data_nbytes = n_elements * data_type.itemsize
+ self.gguf.add_tensor_info(name, tensor.shape, data_type, data_nbytes)
+
+ def write_meta(self) -> None:
+ self.gguf.write_header_to_file()
+ self.gguf.write_kv_data_to_file()
+
+ def write_tensor_info(self) -> None:
+ self.gguf.write_ti_data_to_file()
+
+ def close(self) -> None:
+ self.gguf.close()
@staticmethod
- def write_vocab_only(fname_out: Path, vocab: Vocab) -> None:
- of = OutputFile(fname_out)
- params = Params(n_vocab=vocab.vocab_size, n_embd=0, n_mult=0, n_head=1, n_layer=0)
+ def write_vocab_only(fname_out: Path, params: Params, vocab: Vocab) -> None:
+ check_vocab_size(params, vocab)
+
of = OutputFile(fname_out)
- of.write_file_header(params, file_type=GGMLFileType.AllF32)
- of.write_vocab(vocab)
- of.fout.close()
+
+ # meta data
+ of.add_meta_arch(params)
+ of.add_meta_vocab(vocab)
+ of.write_meta()
+
+ of.close()
@staticmethod
- def write_all(fname_out: Path, params: Params, file_type: GGMLFileType, model: LazyModel, vocab: Vocab) -> None:
+ def write_all(fname_out: Path, params: Params, model: LazyModel, vocab: Vocab) -> None:
check_vocab_size(params, vocab)
+
of = OutputFile(fname_out)
- of.write_file_header(params, file_type)
- print("Writing vocab...")
- of.write_vocab(vocab)
+
+ # meta data
+ of.add_meta_arch(params)
+ of.add_meta_vocab(vocab)
+
+ # tensor info
+ for name, lazy_tensor in model.items():
+ of.add_tensor_info(name, lazy_tensor)
+
+ of.write_meta()
+ of.write_tensor_info()
def do_item(item: Tuple[str, LazyTensor]) -> NDArray:
name, lazy_tensor = item
return lazy_tensor.load().to_ggml().ndarray
+ # tensor data
ndarrays = bounded_parallel_map(do_item, model.items(), concurrency=8)
for i, ((name, lazy_tensor), ndarray) in enumerate(zip(model.items(), ndarrays)):
size = ' x '.join(f"{dim:6d}" for dim in lazy_tensor.shape)
padi = len(str(len(model)))
print(f"[{i+1:{padi}d}/{len(model)}] Writing tensor {name:38s} | size {size:16} | type {lazy_tensor.data_type}")
- of.write_tensor_header(name, lazy_tensor.shape, lazy_tensor.data_type)
- ndarray.tofile(of.fout)
- of.fout.close()
+ of.gguf.write_tensor_data(ndarray)
+ of.close()
def pick_output_type(model: LazyModel, output_type_str: Optional[str]) -> GGMLFileType:
- wq_type = model["layers.0.attention.wq.weight"].data_type
- if output_type_str == "f32" or (output_type_str is None and wq_type in (DT_F32, DT_BF16)):
+ wq_type = model[NAMES[gguf.MODEL_TENSOR.ATTN_Q].format(bid=0)+".weight"].data_type
+
+ if output_type_str == "f32" or (output_type_str is None and wq_type == DT_F32):
return GGMLFileType.AllF32
- if output_type_str == "f16" or (output_type_str is None and wq_type == DT_F16):
+ if output_type_str == "f16" or (output_type_str is None and wq_type in (DT_F16, DT_BF16)):
return GGMLFileType.MostlyF16
- if output_type_str == "q4_1" or (output_type_str is None and isinstance(wq_type, QuantizedDataType) and
- wq_type.have_addends):
- if isinstance(model["output.weight"].data_type, QuantizedDataType):
- return GGMLFileType.MostlyQ4_1
- else:
- return GGMLFileType.PerLayerIsQ4_1
- if output_type_str == "q4_0" or (output_type_str is None and isinstance(wq_type, QuantizedDataType)):
- return GGMLFileType.MostlyQ4_0
+
name_to_type = {name: lazy_tensor.data_type for (name, lazy_tensor) in model.items()}
+
raise Exception(f"Unexpected combination of types: {name_to_type}")
+def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
+ return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
+ for (name, tensor) in model.items()}
-def do_necessary_conversions(model: LazyModel, params: Params) -> LazyModel:
- model = handle_quantization(model)
+def convert_model_names(model: LazyModel, params: Params) -> LazyModel:
+ tmap = gguf.get_tensor_name_map(ARCH, params.n_layer)
- if "lm_head.weight" in model:
- model = convert_transformers_to_orig(model, params)
- model = filter_and_sort_tensors(model)
+ tmp = model
- return model
+ # HF models permut or pack some of the tensors, so we need to undo that
+ for i in itertools.count():
+ if f"model.layers.{i}.self_attn.q_proj.weight" in model:
+ print(f"Permuting layer {i}")
+ tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.q_proj.weight"], params.n_head, params.n_head)
+ tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_lazy(model[f"model.layers.{i}.self_attn.k_proj.weight"], params.n_head, params.n_head_kv)
+ #tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = model[f"model.layers.{i}.self_attn.v_proj.weight"]
+ elif f"model.layers.{i}.self_attn.W_pack.weight" in model:
+ print(f"Unpacking and permuting layer {i}")
+ tmp[f"model.layers.{i}.self_attn.q_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 0, params.n_head, params.n_head)
+ tmp[f"model.layers.{i}.self_attn.k_proj.weight"] = permute_part_lazy(model[f"model.layers.{i}.self_attn.W_pack.weight"], 1, params.n_head, params.n_head_kv)
+ tmp[f"model.layers.{i}.self_attn.v_proj.weight"] = part_lazy (model[f"model.layers.{i}.self_attn.W_pack.weight"], 2)
+ else:
+ break
+ out: LazyModel = {}
+ for name, lazy_tensor in model.items():
+ name_new = name
+
+ if name in tmap:
+ name_new = tmap[name]
+ elif name.endswith(".weight") and name[:-7] in tmap:
+ name_new = tmap[name[:-7]] + ".weight"
+ elif name.endswith(".bias") and name[:-5] in tmap:
+ name_new = tmap[name[:-5]] + ".bias"
+ else:
+ raise Exception(f"Unexpected tensor name: {name}")
-def convert_to_output_type(model: LazyModel, output_type: GGMLFileType) -> LazyModel:
- return {name: tensor.astype(output_type.type_for_tensor(name, tensor))
- for (name, tensor) in model.items()}
+ if gguf.should_skip_tensor_TMP(ARCH, params.n_layer, name_new):
+ print(f"skipping tensor {name_new}")
+ continue
+ else:
+ print(f"{name:48s} -> {name_new:40s} | {lazy_tensor.data_type} | {lazy_tensor.shape}")
+ out[name_new] = lazy_tensor
+ return out
def nth_multifile_path(path: Path, n: int) -> Optional[Path]:
'''Given any path belonging to a multi-file model (e.g. foo.bin.1), return
@@ -1204,11 +924,6 @@ def load_some_model(path: Path) -> ModelPlus:
globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"]
files = [file for glob in globs for file in path.glob(glob)]
if not files:
- # Try GGML too, but with lower priority, since if both a non-GGML
- # model and a GGML model exist in the same directory, we assume the
- # latter was converted from the former.
- files = list(path.glob("ggml-model*.bin*"))
- if not files:
raise Exception(f"Can't find model in directory {path}")
if len(files) > 1:
raise Exception(f"Found multiple models in {path}, not sure which to pick: {files}")
@@ -1224,19 +939,14 @@ def load_some_model(path: Path) -> ModelPlus:
return model_plus
-def filter_and_sort_tensors(model: LazyModel) -> LazyModel:
- return {name: model[name] for name in TENSORS_LIST if name in model}
-
-
-def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab:
- print(f"vocabtype: {vocabtype}")
+def load_vocab(path: Path, vocabtype: Optional[str]) -> Union[BpeVocab, SentencePieceVocab]:
# Be extra-friendly and accept either a file or a directory. Also, if it's
# a directory, it might be the model directory, and tokenizer.model might
# be in the parent of that.
if path.is_dir():
vocab_file = "tokenizer.model"
if vocabtype == 'bpe':
- vocab_file = "vocab.json"
+ vocab_file = "vocab.json"
path2 = path / vocab_file
# Use `.parent` instead of /.. to handle the symlink case better.
path3 = path.parent / vocab_file
@@ -1248,21 +958,24 @@ def load_vocab(path: Path, vocabtype: Optional[str]) -> SentencePieceVocab:
raise FileNotFoundError(
f"Could not find tokenizer.model in {path} or its parent; "
"if it's in another directory, pass the directory as --vocab-dir")
+
+ print(f"Loading vocab file '{path}', type '{vocabtype}'")
+
added_tokens_path = path.parent / "added_tokens.json"
- print(f"Loading vocab file {path}")
- return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None,
- vocabtype)
+ if vocabtype == "bpe":
+ return BpeVocab(path, added_tokens_path if added_tokens_path.exists() else None)
+ elif vocabtype == "spm":
+ return SentencePieceVocab(path, added_tokens_path if added_tokens_path.exists() else None)
+ else:
+ raise ValueError(f"Unsupported vocabulary type {vocabtype}")
def default_outfile(model_paths: List[Path], file_type: GGMLFileType) -> Path:
namestr = {
- GGMLFileType.AllF32: "f32",
+ GGMLFileType.AllF32: "f32",
GGMLFileType.MostlyF16: "f16",
- GGMLFileType.MostlyQ4_0: "q4_0",
- GGMLFileType.MostlyQ4_1: "q4_1",
- GGMLFileType.PerLayerIsQ4_1: "q4_1",
}[file_type]
- ret = model_paths[0].parent / f"ggml-model-{namestr}.bin"
+ ret = model_paths[0].parent / f"ggml-model-{namestr}.gguf"
if ret in model_paths:
sys.stderr.write(
f"Error: Default output path ({ret}) would overwrite the input. "
@@ -1281,44 +994,59 @@ def do_dump_model(model_plus: ModelPlus) -> None:
def main(args_in: Optional[List[str]] = None) -> None:
parser = argparse.ArgumentParser(description="Convert a LLaMa model to a GGML compatible file")
- parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
- parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
- parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
- parser.add_argument("--outtype", choices=["f32", "f16", "q4_1", "q4_0"], help="output format (default: based on input)")
- parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
- parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
- parser.add_argument("model", type=Path,
- help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
- parser.add_argument("--vocabtype", default='spm', choices=["spm", "bpe"], help="vocab format (default: spm)")
+ parser.add_argument("--dump", action="store_true", help="don't convert, just show what's in the model")
+ parser.add_argument("--dump-single", action="store_true", help="don't convert, just show what's in a single model file")
+ parser.add_argument("--vocab-only", action="store_true", help="extract only the vocab")
+ parser.add_argument("--outtype", choices=["f32", "f16"], help="output format (default: based on input)")
+ parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file")
+ parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input")
+ parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)")
+ parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)", default="spm")
+ parser.add_argument("--ctx", type=int, help="model training context (default: based on input)")
args = parser.parse_args(args_in)
- vocab: Vocab
if args.dump_single:
model_plus = lazy_load_file(args.model)
do_dump_model(model_plus)
- elif args.vocab_only:
+
+ model_plus = load_some_model(args.model)
+
+ params = Params.load(model_plus)
+ if params.n_ctx == -1:
+ if args.ctx is None:
+ raise Exception("The model doesn't have a context size, and you didn't specify one with --ctx\n"
+ "Please specify one with --ctx:\n"
+ " - LLaMA v1: --ctx 2048\n"
+ " - LLaMA v2: --ctx 4096\n")
+ params.n_ctx = args.ctx
+
+ print(f"params = {params}")
+
+ vocab: Vocab
+ if args.vocab_only:
vocab = load_vocab(args.vocab_dir or args.model, args.vocabtype)
assert args.outfile, "need --outfile if using --vocab-only"
outfile = args.outfile
- OutputFile.write_vocab_only(outfile, vocab)
+ OutputFile.write_vocab_only(outfile, params, vocab)
print(f"Wrote {outfile}")
else:
- model_plus = load_some_model(args.model)
if args.dump:
do_dump_model(model_plus)
return
+
if model_plus.vocab is not None and args.vocab_dir is None:
vocab = model_plus.vocab
else:
vocab_dir = args.vocab_dir if args.vocab_dir else model_plus.paths[0].parent
vocab = load_vocab(vocab_dir, args.vocabtype)
- params = Params.load(model_plus)
- model = model_plus.model
- model = do_necessary_conversions(model, params)
+
+ model = model_plus.model
+ model = convert_model_names(model, params)
output_type = pick_output_type(model, args.outtype)
- model = convert_to_output_type(model, output_type)
- outfile = args.outfile or default_outfile(model_plus.paths, output_type)
- OutputFile.write_all(outfile, params, output_type, model, vocab)
+ model = convert_to_output_type(model, output_type)
+ outfile = args.outfile or default_outfile(model_plus.paths, output_type)
+
+ OutputFile.write_all(outfile, params, model, vocab)
print(f"Wrote {outfile}")