diff options
author | Shijie <821898965@qq.com> | 2024-01-19 19:53:13 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-01-19 13:53:13 +0200 |
commit | 9b75cb2b3ccbed3df2e14c1202168db3e5145095 (patch) | |
tree | 1ce9e2b388805fefac29ff6feebb326b78d708c8 /convert-hf-to-gguf.py | |
parent | de9a147df14e62f54f879d2d15e6c4793107f4fc (diff) |
llama : support upcoming Qwen2 (#5037)
Diffstat (limited to 'convert-hf-to-gguf.py')
-rwxr-xr-x | convert-hf-to-gguf.py | 4 |
1 files changed, 4 insertions, 0 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index d2d6948d..5cb3e63f 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -189,6 +189,8 @@ class Model: return StableLMModel if model_architecture == "QWenLMHeadModel": return QwenModel + if model_architecture == "Qwen2ForCausalLM": + return Model if model_architecture == "MixtralForCausalLM": return MixtralModel if model_architecture == "GPT2LMHeadModel": @@ -236,6 +238,8 @@ class Model: return gguf.MODEL_ARCH.STABLELM if arch == "QWenLMHeadModel": return gguf.MODEL_ARCH.QWEN + if arch == "Qwen2ForCausalLM": + return gguf.MODEL_ARCH.QWEN2 if arch == "MixtralForCausalLM": return gguf.MODEL_ARCH.LLAMA if arch == "GPT2LMHeadModel": |