diff options
author | Guoteng <32697156+SolenoidWGT@users.noreply.github.com> | 2024-02-01 17:19:51 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-02-01 11:19:51 +0200 |
commit | ce32060198b7e2d6a13a9b8e1e1369e3c295ae2a (patch) | |
tree | 546b5a7a327d3f3f1370549331915c4d6373d51d /gguf-py/gguf/tensor_mapping.py | |
parent | 1cfb5372cf5707c8ec6dde7c874f4a44a6c4c915 (diff) |
llama : support InternLM2 (#5184)
* support InternLM2 inference
* add add_space_prefix KV pair
Diffstat (limited to 'gguf-py/gguf/tensor_mapping.py')
-rw-r--r-- | gguf-py/gguf/tensor_mapping.py | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/gguf-py/gguf/tensor_mapping.py b/gguf-py/gguf/tensor_mapping.py index de177af1..4f16d850 100644 --- a/gguf-py/gguf/tensor_mapping.py +++ b/gguf-py/gguf/tensor_mapping.py @@ -19,6 +19,7 @@ class TensorNameMap: "language_model.embedding.word_embeddings", # persimmon "wte", # gpt2 "transformer.embd.wte", # phi2 + "model.tok_embeddings", # internlm2 ), # Token type embeddings @@ -42,7 +43,7 @@ class TensorNameMap: MODEL_TENSOR.OUTPUT: ( "embed_out", # gptneox "lm_head", # gpt2 mpt falcon llama-hf baichuan qwen - "output", # llama-pth bloom + "output", # llama-pth bloom internlm2 "word_embeddings_for_head", # persimmon "lm_head.linear", # phi2 ), @@ -51,7 +52,7 @@ class TensorNameMap: MODEL_TENSOR.OUTPUT_NORM: ( "gpt_neox.final_layer_norm", # gptneox "transformer.ln_f", # gpt2 gpt-j falcon - "model.norm", # llama-hf baichuan + "model.norm", # llama-hf baichuan internlm2 "norm", # llama-pth "embeddings.LayerNorm", # bert "transformer.norm_f", # mpt @@ -84,6 +85,7 @@ class TensorNameMap: "h.{bid}.ln_1", # gpt2 "transformer.h.{bid}.ln", # phi2 "model.layers.layers.{bid}.norm", # plamo + "model.layers.{bid}.attention_norm", # internlm2 ), # Attention norm 2 @@ -111,6 +113,7 @@ class TensorNameMap: "encoder.layer.{bid}.attention.self.query", # bert "transformer.h.{bid}.attn.q_proj", # gpt-j "model.layers.layers.{bid}.self_attn.q_proj", # plamo + "model.layers.{bid}.attention.wq" # internlm2 ), # Attention key @@ -120,6 +123,7 @@ class TensorNameMap: "encoder.layer.{bid}.attention.self.key", # bert "transformer.h.{bid}.attn.k_proj", # gpt-j "model.layers.layers.{bid}.self_attn.k_proj", # plamo + "model.layers.{bid}.attention.wk" # internlm2 ), # Attention value @@ -129,6 +133,7 @@ class TensorNameMap: "encoder.layer.{bid}.attention.self.value", # bert "transformer.h.{bid}.attn.v_proj", # gpt-j "model.layers.layers.{bid}.self_attn.v_proj", # plamo + "model.layers.{bid}.attention.wv" # internlm2 ), # Attention output @@ -147,6 +152,7 @@ class TensorNameMap: "h.{bid}.attn.c_proj", # gpt2 "transformer.h.{bid}.mixer.out_proj", # phi2 "model.layers.layers.{bid}.self_attn.o_proj", # plamo + "model.layers.{bid}.attention.wo", # internlm2 ), # Rotary embeddings @@ -169,6 +175,7 @@ class TensorNameMap: "language_model.encoder.layers.{bid}.post_attention_layernorm", # persimmon "model.layers.{bid}.ln2", # yi "h.{bid}.ln_2", # gpt2 + "model.layers.{bid}.ffn_norm", # internlm2 ), MODEL_TENSOR.FFN_GATE_INP: ( @@ -194,6 +201,7 @@ class TensorNameMap: "transformer.h.{bid}.mlp.fc1", # phi2 "model.layers.{bid}.mlp.fc1", # phi2 "model.layers.layers.{bid}.mlp.up_proj", # plamo + "model.layers.{bid}.feed_forward.w3", # internlm2 ), MODEL_TENSOR.FFN_UP_EXP: ( @@ -212,6 +220,7 @@ class TensorNameMap: "layers.{bid}.feed_forward.w1", # llama-pth "transformer.h.{bid}.mlp.w2", # qwen "model.layers.layers.{bid}.mlp.gate_proj", # plamo + "model.layers.{bid}.feed_forward.w1", # internlm2 ), MODEL_TENSOR.FFN_GATE_EXP: ( @@ -236,6 +245,7 @@ class TensorNameMap: "transformer.h.{bid}.mlp.fc2", # phi2 "model.layers.{bid}.mlp.fc2", # phi2 "model.layers.layers.{bid}.mlp.down_proj", # plamo + "model.layers.{bid}.feed_forward.w2", # internlm2 ), MODEL_TENSOR.FFN_DOWN_EXP: ( |