diff options
author | amd-lalithnc <lalithnc@amd.com> | 2024-05-17 12:31:58 +0530 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-17 10:01:58 +0300 |
commit | e18bc6aaf3b547890609ed254ee5248e720e5840 (patch) | |
tree | 63ce5f59f44e9d7a671c647c59aa289b584ffa95 | |
parent | ee94172d33399d2e814ca05c8a3ff8c523ebb093 (diff) |
convert : fix Qwen/Qwen-7b conversion (#7308)
-rwxr-xr-x | convert-hf-to-gguf.py | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index cd875fa4..2810e1e4 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -526,7 +526,7 @@ class Model: # for this kind of tokenizer, added_vocab is not a subset of vocab, so they need to be combined added_vocab = tokenizer.special_tokens - reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in (vocab | added_vocab).items()} + reverse_vocab = {id_ : encoded_tok for encoded_tok, id_ in {**vocab, **added_vocab}.items()} for i in range(vocab_size): if i not in reverse_vocab: |