summaryrefslogtreecommitdiff
path: root/convert-hf-to-gguf.py
diff options
context:
space:
mode:
Diffstat (limited to 'convert-hf-to-gguf.py')
-rwxr-xr-xconvert-hf-to-gguf.py136
1 files changed, 135 insertions, 1 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index 18337839..afa034a8 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -1216,6 +1216,8 @@ class LlamaModel(Model):
tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
n_head = self.hparams.get("num_attention_heads")
n_kv_head = self.hparams.get("num_key_value_heads")
+ n_experts = self.hparams.get("num_local_experts")
+ experts = dict()
for name, data_torch in self.get_tensors():
# we don't need these
if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
@@ -1236,6 +1238,49 @@ class LlamaModel(Model):
data = data.squeeze()
+ # process the experts separately
+ if name.find("block_sparse_moe.experts") != -1:
+ experts[name] = data
+ if len(experts) >= n_experts:
+ # merge the experts into a single 3d tensor
+ for bid in range(block_count):
+ for wid in range(1, 4):
+ full = True
+ for xid in range(n_experts):
+ ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
+ if ename not in experts:
+ full = False
+ break
+ if not full:
+ continue
+
+ datas = []
+ for xid in range(n_experts):
+ ename = f"model.layers.{bid}.block_sparse_moe.experts.{xid}.w{wid}.weight"
+ datas.append(experts[ename])
+ del experts[ename]
+
+ data = np.stack(datas, axis=0)
+ data_dtype = data.dtype
+
+ if self.ftype == 0 and data_dtype == np.float16:
+ data = data.astype(np.float32)
+
+ if self.ftype == 1 and data_dtype == np.float32:
+ data = data.astype(np.float16)
+
+ merged_name = f"layers.{bid}.feed_forward.experts.w{wid}.weight"
+
+ new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
+ if new_name is None:
+ print(f"Can not map tensor {name!r}")
+ sys.exit()
+
+ print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+
+ self.gguf_writer.add_tensor(new_name, data)
+ continue
+
# map tensor names
new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
if new_name is None:
@@ -1249,7 +1294,7 @@ class LlamaModel(Model):
if self.ftype == 0 and data_dtype == np.float16:
data = data.astype(np.float32)
- # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
+ # 1d tensors need to be converted to float32
if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
data = data.astype(np.float32)
@@ -1261,6 +1306,9 @@ class LlamaModel(Model):
self.gguf_writer.add_tensor(new_name, data)
+ if len(experts) > 0:
+ raise ValueError(f"Unprocessed experts: {experts.keys()}")
+
@Model.register("GrokForCausalLM")
class GrokModel(Model):
@@ -1276,6 +1324,92 @@ class GrokModel(Model):
super().set_gguf_parameters()
self.gguf_writer.add_name("Grok")
+ def write_tensors(self):
+ block_count = self.hparams.get("n_layers", self.hparams.get("num_hidden_layers", self.hparams.get("n_layer")))
+ tensor_map = gguf.get_tensor_name_map(self.model_arch, block_count)
+ n_experts = self.hparams.get("num_local_experts")
+ experts = dict()
+ for name, data_torch in self.get_tensors():
+ # we don't need these
+ if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
+ continue
+
+ old_dtype = data_torch.dtype
+
+ # convert any unsupported data types to float32
+ if data_torch.dtype not in (torch.float16, torch.float32):
+ data_torch = data_torch.to(torch.float32)
+
+ data = data_torch.squeeze().numpy()
+
+ # process the experts separately
+ if name.find(".moe.") != -1:
+ experts[name] = data
+ if len(experts) >= n_experts:
+ # merge the experts into a single 3d tensor
+ for bid in range(block_count):
+ for wid in ["linear", "linear_1", "linear_v"]:
+ full = True
+ for xid in range(n_experts):
+ ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
+ if ename not in experts:
+ full = False
+ break
+ if not full:
+ continue
+
+ datas = []
+ for xid in range(n_experts):
+ ename = f"transformer.decoder_layer.{bid}.moe.{xid}.{wid}.weight"
+ datas.append(experts[ename])
+ del experts[ename]
+
+ data = np.stack(datas, axis=0)
+ data_dtype = data.dtype
+
+ if self.ftype == 0 and data_dtype == np.float16:
+ data = data.astype(np.float32)
+
+ if self.ftype == 1 and data_dtype == np.float32:
+ data = data.astype(np.float16)
+
+ merged_name = f"transformer.decoder_layer.{bid}.moe.{wid}.weight"
+
+ new_name = tensor_map.get_name(merged_name, try_suffixes=(".weight", ".bias"))
+ if new_name is None:
+ print(f"Can not map tensor {name!r}")
+ sys.exit()
+
+ print(f"{new_name}, n_dims = {len(data.shape)}, shape = {data.shape} --> {data.dtype}")
+
+ self.gguf_writer.add_tensor(new_name, data)
+ continue
+
+ # map tensor names
+ new_name = tensor_map.get_name(name, try_suffixes=(".weight", ".bias"))
+ if new_name is None:
+ print(f"Can not map tensor {name!r}")
+ sys.exit()
+
+ n_dims = len(data.shape)
+ data_dtype = data.dtype
+
+ # if f32 desired, convert any float16 to float32
+ if self.ftype == 0 and data_dtype == np.float16:
+ data = data.astype(np.float32)
+
+ # TODO: Why cant we use these float16 as-is? There should be not reason to store float16 as float32
+ if self.ftype == 1 and data_dtype == np.float16 and n_dims == 1:
+ data = data.astype(np.float32)
+
+ # if f16 desired, convert any float32 2-dim weight tensors to float16
+ if self.ftype == 1 and data_dtype == np.float32 and name.endswith(".weight") and n_dims == 2:
+ data = data.astype(np.float16)
+
+ print(f"{new_name}, n_dims = {n_dims}, {old_dtype} --> {data.dtype}")
+
+ self.gguf_writer.add_tensor(new_name, data)
+
@Model.register("MiniCPMForCausalLM")
class MiniCPMModel(Model):