summaryrefslogtreecommitdiff
path: root/convert-hf-to-gguf.py
diff options
context:
space:
mode:
authorDouglas Hanley <thesecretaryofwar@gmail.com>2024-02-13 06:06:58 -0600
committerGitHub <noreply@github.com>2024-02-13 14:06:58 +0200
commit03bf161eb6dea6400ee49c6dc6b69bdcfa9fd3fc (patch)
tree49320ac8aca35d2ba8162c2a280924bacbd7e06b /convert-hf-to-gguf.py
parentad014bba97ef6ef6c3e2f78b2fc463e91ae94579 (diff)
llama : support batched embeddings (#5466)
* batched embedding: pool outputs by sequence id. updated embedding example * bring back non-causal attention * embd : minor improvements * llama : minor --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'convert-hf-to-gguf.py')
-rwxr-xr-xconvert-hf-to-gguf.py1
1 files changed, 1 insertions, 0 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py
index cae1551a..5adfdc14 100755
--- a/convert-hf-to-gguf.py
+++ b/convert-hf-to-gguf.py
@@ -1648,6 +1648,7 @@ class BertModel(Model):
self.gguf_writer.add_head_count(self.hparams["num_attention_heads"])
self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"])
self.gguf_writer.add_causal_attention(False)
+ self.gguf_writer.add_pooling_layer(True)
self.gguf_writer.add_file_type(self.ftype)
def set_vocab(self):