summaryrefslogtreecommitdiff
path: root/gguf-py/gguf/tensor_mapping.py
diff options
context:
space:
mode:
authorfairydreaming <166155368+fairydreaming@users.noreply.github.com>2024-05-24 14:31:13 +0200
committerGitHub <noreply@github.com>2024-05-24 14:31:13 +0200
commitfbca2f27fc7fa9aa4a8ad0357478fdb908472908 (patch)
tree9226fa114f6e0f6578c6946f5a23c7ab76ef0854 /gguf-py/gguf/tensor_mapping.py
parent0df0aa8e43c3378975269a51f9b876c8692e70da (diff)
Add support for ArcticForCausalLM (#7020)
* common : increase max number of experts to 128 * common : add tensor LLM_TENSOR_FFN_NORM_EXPS for normalization before MoE that runs in parallel to attention + ffn * gguf-py : add architecture-specific block mappings that override selected general block mappings * convert-hf : add model conversion support for ArcticForCausalLM * convert-hf : use added_tokens_decoder from tokenizer_config.json to redefine tokens from SentencePiece model (only for ArcticForCausalLM) * llama : add inference support for LLM_ARCH_ARCTIC --------- Co-authored-by: Stanisław Szymczyk <sszymczy@gmail.com>
Diffstat (limited to 'gguf-py/gguf/tensor_mapping.py')
-rw-r--r--gguf-py/gguf/tensor_mapping.py19
1 files changed, 18 insertions, 1 deletions
diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py
index 8e1cac91..8b1b21d7 100644
--- a/gguf-py/gguf/tensor_mapping.py
+++ b/gguf-py/gguf/tensor_mapping.py
@@ -244,6 +244,7 @@ class TensorNameMap:
"encoder.layers.{bid}.mlp.fc11", # nomic-bert
"model.layers.{bid}.mlp.c_fc", # starcoder2
"encoder.layer.{bid}.mlp.gated_layers_v", # jina-bert-v2
+ "model.layers.{bid}.residual_mlp.w3", # arctic
),
MODEL_TENSOR.FFN_UP_EXP: (
@@ -272,6 +273,7 @@ class TensorNameMap:
"encoder.layers.{bid}.mlp.fc12", # nomic-bert
"encoder.layer.{bid}.mlp.gated_layers_w", # jina-bert-v2
"transformer.h.{bid}.mlp.linear_1", # refact
+ "model.layers.{bid}.residual_mlp.w1", # arctic
),
MODEL_TENSOR.FFN_GATE_EXP: (
@@ -306,6 +308,7 @@ class TensorNameMap:
"encoder.layers.{bid}.mlp.fc2", # nomic-bert
"model.layers.{bid}.mlp.c_proj", # starcoder2
"encoder.layer.{bid}.mlp.wo", # jina-bert-v2
+ "model.layers.{bid}.residual_mlp.w2", # arctic
),
MODEL_TENSOR.FFN_DOWN_EXP: (
@@ -382,6 +385,18 @@ class TensorNameMap:
),
}
+ # architecture-specific block mappings
+ arch_block_mappings_cfg: dict[MODEL_ARCH, dict[MODEL_TENSOR, tuple[str, ...]]] = {
+ MODEL_ARCH.ARCTIC: {
+ MODEL_TENSOR.FFN_NORM: (
+ "model.layers.{bid}.residual_layernorm",
+ ),
+ MODEL_TENSOR.FFN_NORM_EXP: (
+ "model.layers.{bid}.post_attention_layernorm",
+ ),
+ },
+ }
+
mapping: dict[str, tuple[MODEL_TENSOR, str]]
def __init__(self, arch: MODEL_ARCH, n_blocks: int):
@@ -393,12 +408,14 @@ class TensorNameMap:
self.mapping[tensor_name] = (tensor, tensor_name)
for key in keys:
self.mapping[key] = (tensor, tensor_name)
+ if arch in self.arch_block_mappings_cfg:
+ self.block_mappings_cfg.update(self.arch_block_mappings_cfg[arch])
for bid in range(n_blocks):
for tensor, keys in self.block_mappings_cfg.items():
if tensor not in MODEL_TENSORS[arch]:
continue
# TODO: make this configurable
- n_experts = 60
+ n_experts = 128
for xid in range(n_experts):
tensor_name = TENSOR_NAMES[tensor].format(bid = bid, xid = xid)
self.mapping[tensor_name] = (tensor, tensor_name)