summaryrefslogtreecommitdiff
path: root/convert-hf-to-gguf.py
diff options
context:
space:
mode:
authorBartowski <ckealty1182@gmail.com>2024-05-02 19:49:09 -0400
committerGitHub <noreply@github.com>2024-05-03 01:49:09 +0200
commit60325fa56f61c228464c9f065db3aa6a61f2156e (patch)
tree0698b84c48d02403434f79bbc4765c0040298402 /convert-hf-to-gguf.py
parent6ecf3189e00a1e8e737a78b6d10e1d7006e050a2 (diff)
Remove .attention from skipped tensors to match more accurately (#7051)
Diffstat (limited to 'convert-hf-to-gguf.py')
-rwxr-xr-xconvert-hf-to-gguf.py2
1 files changed, 1 insertions, 1 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index 2f146d73..612aea17 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -1427,7 +1427,7 @@ class LlamaModel(Model):
experts = dict()
for name, data_torch in self.get_tensors():
# we don't need these
- if name.endswith((".attention.masked_bias", ".attention.bias", ".attention.rotary_emb.inv_freq")):
+ if name.endswith((".attention.masked_bias", ".attention.bias", ".rotary_emb.inv_freq")):
continue
old_dtype = data_torch.dtype