diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2024-01-12 13:01:56 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-01-12 13:01:56 +0200 |
commit | f445c0e68cf8e1faca0b2aa8dfb9d48231cec301 (patch) | |
tree | a230e263424ba71153764e21fef7b92f46bd9fd9 /convert-hf-to-gguf.py | |
parent | 326b418b59b6d48d854c4461a2303e8ac0a311e6 (diff) |
llama : fix llm_build_k_shift to use correct n_rot (#4889)
* llama : fix llm_build_k_shift to use correct n_rot
ggml-ci
* llama : always use hparams.n_rot for ggml_rope_custom
ggml-ci
* convert : fix persimmon conversion to write correct n_rot
Diffstat (limited to 'convert-hf-to-gguf.py')
-rwxr-xr-x | convert-hf-to-gguf.py | 9 |
1 files changed, 8 insertions, 1 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index 203eaf64..813aeeed 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -817,10 +817,17 @@ class PersimmonModel(Model): hidden_size = self.hparams["hidden_size"] self.gguf_writer.add_name('persimmon-8b-chat') + self.gguf_writer.add_context_length(self.hparams["max_position_embeddings"]) self.gguf_writer.add_embedding_length(hidden_size) self.gguf_writer.add_block_count(block_count) self.gguf_writer.add_feed_forward_length(self.hparams["intermediate_size"]) - self.gguf_writer.add_rope_dimension_count(hidden_size // head_count) + + # NOTE: not sure about this change - why does the model not have a rope dimension count when it is smaller + # than the head size? + # ref: https://github.com/ggerganov/llama.cpp/pull/4889 + #self.gguf_writer.add_rope_dimension_count(hidden_size // head_count) + self.gguf_writer.add_rope_dimension_count(hidden_size // head_count // 2) + self.gguf_writer.add_head_count(head_count) self.gguf_writer.add_head_count_kv(head_count_kv) self.gguf_writer.add_rope_freq_base(self.hparams["rope_theta"]) |