From 580111d42b3b6ad0a390bfb267d6e3077506eb31 Mon Sep 17 00:00:00 2001 From: postmasters Date: Wed, 21 Feb 2024 05:08:22 -0800 Subject: llama : add `gemma` model (#5631) There are couple things in this architecture: 1. Shared input and output embedding parameters. 2. Key length and value length are not derived from `n_embd`. More information about the models can be found at https://ai.google.dev/gemma. GGUFs can be downloaded from https://huggingface.co/google. --- gguf-py/gguf/constants.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) (limited to 'gguf-py') diff --git a/gguf-py/gguf/constants.py b/gguf-py/gguf/constants.py index 114a9a97..8f9139d1 100644 --- a/gguf-py/gguf/constants.py +++ b/gguf-py/gguf/constants.py @@ -111,6 +111,7 @@ class MODEL_ARCH(IntEnum): ORION = auto() INTERNLM2 = auto() MINICPM = auto() + GEMMA = auto() class MODEL_TENSOR(IntEnum): @@ -167,6 +168,7 @@ MODEL_ARCH_NAMES: dict[MODEL_ARCH, str] = { MODEL_ARCH.ORION: "orion", MODEL_ARCH.INTERNLM2: "internlm2", MODEL_ARCH.MINICPM: "minicpm", + MODEL_ARCH.GEMMA: "gemma", } TENSOR_NAMES: dict[MODEL_TENSOR, str] = { @@ -511,6 +513,19 @@ MODEL_TENSORS: dict[MODEL_ARCH, list[MODEL_TENSOR]] = { MODEL_TENSOR.FFN_DOWN_EXP, MODEL_TENSOR.FFN_UP_EXP, ], + MODEL_ARCH.GEMMA: [ + MODEL_TENSOR.TOKEN_EMBD, + MODEL_TENSOR.OUTPUT_NORM, + MODEL_TENSOR.ATTN_NORM, + MODEL_TENSOR.ATTN_Q, + MODEL_TENSOR.ATTN_K, + MODEL_TENSOR.ATTN_V, + MODEL_TENSOR.ATTN_OUT, + MODEL_TENSOR.FFN_GATE, + MODEL_TENSOR.FFN_DOWN, + MODEL_TENSOR.FFN_UP, + MODEL_TENSOR.FFN_NORM, + ], # TODO } -- cgit v1.2.3