summaryrefslogtreecommitdiff
path: root/examples/llava
diff options
context:
space:
mode:
authorJared Van Bortel <jared@nomic.ai>2024-04-09 13:44:08 -0400
committerGitHub <noreply@github.com>2024-04-09 13:44:08 -0400
commit1b67731e184e27a465b8c5476061294a4af668ea (patch)
tree15a2d877029fb509a34e462c227475bc7d6dc31e /examples/llava
parentc4a3a4ff47d62d2503ddf9bd91b58c21f04fe3c3 (diff)
BERT tokenizer fixes (#6498)
Key changes: * BERT conversion: fix abuse of LlamaHfVocab, do not set BOS or EOS * Nomic Embed conversion: pad vocab instead of slicing embedding tensor * llama_tokenize: handle added special tokens like HF does
Diffstat (limited to 'examples/llava')
-rw-r--r--examples/llava/llava-cli.cpp3
1 files changed, 1 insertions, 2 deletions
diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp
index e29da6cb..75948806 100644
--- a/examples/llava/llava-cli.cpp
+++ b/examples/llava/llava-cli.cpp
@@ -146,7 +146,6 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
int n_past = 0;
const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict;
- const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx_llava->ctx_llama));
std::string system_prompt, user_prompt;
size_t image_pos = prompt.find("<image>");
@@ -180,7 +179,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_
}
}
- eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, add_bos);
+ eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true);
llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past);
eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false);