summaryrefslogtreecommitdiff
path: root/gguf-py
diff options
context:
space:
mode:
Diffstat (limited to 'gguf-py')
-rw-r--r--gguf-py/gguf/constants.py17
-rw-r--r--gguf-py/gguf/tensor_mapping.py37
2 files changed, 40 insertions, 14 deletions
diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py
index 390dca04..4cd87cdd 100644
--- a/gguf-py/gguf/constants.py
+++ b/gguf-py/gguf/constants.py
@@ -96,6 +96,7 @@ class MODEL_ARCH(IntEnum):
STABLELM = auto()
QWEN = auto()
PHI2 = auto()
+ PLAMO = auto()
class MODEL_TENSOR(IntEnum):
@@ -142,6 +143,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = {
MODEL_ARCH.STABLELM: "stablelm",
MODEL_ARCH.QWEN: "qwen",
MODEL_ARCH.PHI2: "phi2",
+ MODEL_ARCH.PLAMO: "plamo",
}
TENSOR_NAMES: dict[MODEL_TENSOR, str] = {
@@ -349,6 +351,21 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = {
MODEL_TENSOR.FFN_DOWN,
MODEL_TENSOR.FFN_UP,
],
+ MODEL_ARCH.PLAMO: [
+ MODEL_TENSOR.TOKEN_EMBD,
+ MODEL_TENSOR.OUTPUT_NORM,
+ MODEL_TENSOR.OUTPUT,
+ MODEL_TENSOR.ROPE_FREQS,
+ MODEL_TENSOR.ATTN_NORM,
+ MODEL_TENSOR.ATTN_Q,
+ MODEL_TENSOR.ATTN_K,
+ MODEL_TENSOR.ATTN_V,
+ MODEL_TENSOR.ATTN_OUT,
+ MODEL_TENSOR.ATTN_ROT_EMBD,
+ MODEL_TENSOR.FFN_GATE,
+ MODEL_TENSOR.FFN_DOWN,
+ MODEL_TENSOR.FFN_UP,
+ ],
MODEL_ARCH.GPT2: [
# TODO
],
diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py
index 6fcbdbc1..446c6b68 100644
--- a/gguf-py/gguf/tensor_mapping.py
+++ b/gguf-py/gguf/tensor_mapping.py
@@ -79,6 +79,7 @@ class TensorNameMap:
"language_model.encoder.layers.{bid}.input_layernorm", # persimmon
"model.layers.{bid}.ln1", # yi
"transformer.h.{bid}.ln", # phi2
+ "model.layers.layers.{bid}.norm", # plamo
),
# Attention norm 2
@@ -99,26 +100,29 @@ class TensorNameMap:
# Attention query
MODEL_TENSOR.ATTN_Q: (
- "model.layers.{bid}.self_attn.q_proj", # llama-hf
- "layers.{bid}.attention.wq", # llama-pth
- "encoder.layer.{bid}.attention.self.query", # bert
- "transformer.h.{bid}.attn.q_proj", # gpt-j
+ "model.layers.{bid}.self_attn.q_proj", # llama-hf
+ "layers.{bid}.attention.wq", # llama-pth
+ "encoder.layer.{bid}.attention.self.query", # bert
+ "transformer.h.{bid}.attn.q_proj", # gpt-j
+ "model.layers.layers.{bid}.self_attn.q_proj", # plamo
),
# Attention key
MODEL_TENSOR.ATTN_K: (
- "model.layers.{bid}.self_attn.k_proj", # llama-hf
- "layers.{bid}.attention.wk", # llama-pth
- "encoder.layer.{bid}.attention.self.key", # bert
- "transformer.h.{bid}.attn.k_proj", # gpt-j
+ "model.layers.{bid}.self_attn.k_proj", # llama-hf
+ "layers.{bid}.attention.wk", # llama-pth
+ "encoder.layer.{bid}.attention.self.key", # bert
+ "transformer.h.{bid}.attn.k_proj", # gpt-j
+ "model.layers.layers.{bid}.self_attn.k_proj", # plamo
),
# Attention value
MODEL_TENSOR.ATTN_V: (
- "model.layers.{bid}.self_attn.v_proj", # llama-hf
- "layers.{bid}.attention.wv", # llama-pth
- "encoder.layer.{bid}.attention.self.value", # bert
- "transformer.h.{bid}.attn.v_proj", # gpt-j
+ "model.layers.{bid}.self_attn.v_proj", # llama-hf
+ "layers.{bid}.attention.wv", # llama-pth
+ "encoder.layer.{bid}.attention.self.value", # bert
+ "transformer.h.{bid}.attn.v_proj", # gpt-j
+ "model.layers.layers.{bid}.self_attn.v_proj", # plamo
),
# Attention output
@@ -134,12 +138,14 @@ class TensorNameMap:
"transformer.h.{bid}.attn.out_proj", # gpt-j
"language_model.encoder.layers.{bid}.self_attention.dense", # persimmon
"transformer.h.{bid}.mixer.out_proj", # phi2
+ "model.layers.layers.{bid}.self_attn.o_proj", # plamo
),
# Rotary embeddings
MODEL_TENSOR.ATTN_ROT_EMBD: (
- "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
- "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
+ "model.layers.{bid}.self_attn.rotary_emb.inv_freq", # llama-hf
+ "layers.{bid}.attention.inner_attention.rope.freqs", # llama-pth
+ "model.layers.layers.{bid}.self_attn.rotary_emb.inv_freq", # plamo
),
# Feed-forward norm
@@ -174,6 +180,7 @@ class TensorNameMap:
"language_model.encoder.layers.{bid}.mlp.dense_h_to_4h", # persimmon
"transformer.h.{bid}.mlp.w1", # qwen
"transformer.h.{bid}.mlp.fc1", # phi2
+ "model.layers.layers.{bid}.mlp.up_proj", # plamo
),
MODEL_TENSOR.FFN_UP_EXP: (
@@ -186,6 +193,7 @@ class TensorNameMap:
"model.layers.{bid}.mlp.gate_proj", # llama-hf refact
"layers.{bid}.feed_forward.w1", # llama-pth
"transformer.h.{bid}.mlp.w2", # qwen
+ "model.layers.layers.{bid}.mlp.gate_proj", # plamo
),
MODEL_TENSOR.FFN_GATE_EXP: (
@@ -206,6 +214,7 @@ class TensorNameMap:
"transformer.h.{bid}.mlp.fc_out", # gpt-j
"language_model.encoder.layers.{bid}.mlp.dense_4h_to_h", # persimmon
"transformer.h.{bid}.mlp.fc2", # phi2
+ "model.layers.layers.{bid}.mlp.down_proj", # plamo
),
MODEL_TENSOR.FFN_DOWN_EXP: (