From 03bf161eb6dea6400ee49c6dc6b69bdcfa9fd3fc Mon Sep 17 00:00:00 2001 From: Douglas Hanley Date: Tue, 13 Feb 2024 06:06:58 -0600 Subject: llama : support batched embeddings (#5466) * batched embedding: pool outputs by sequence id. updated embedding example * bring back non-causal attention * embd : minor improvements * llama : minor --------- Co-authored-by: Georgi Gerganov --- convert-hf-to-gguf.py | 1 + 1 file changed, 1 insertion(+) (limited to 'convert-hf-to-gguf.py') diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index cae1551a..5adfdc14 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -1648,6 +1648,7 @@ class BertModel(Model): self.gguf_writer.add_head_count(self.hparams["num_attention_heads"]) self.gguf_writer.add_layer_norm_eps(self.hparams["layer_norm_eps"]) self.gguf_writer.add_causal_attention(False) + self.gguf_writer.add_pooling_layer(True) self.gguf_writer.add_file_type(self.ftype) def set_vocab(self): -- cgit v1.2.3