summaryrefslogtreecommitdiff
path: root/examples/tokenize/tokenize.cpp
diff options
context:
space:
mode:
authorJared Van Bortel <jared@nomic.ai>2024-04-09 13:44:08 -0400
committerGitHub <noreply@github.com>2024-04-09 13:44:08 -0400
commit1b67731e184e27a465b8c5476061294a4af668ea (patch)
tree15a2d877029fb509a34e462c227475bc7d6dc31e /examples/tokenize/tokenize.cpp
parentc4a3a4ff47d62d2503ddf9bd91b58c21f04fe3c3 (diff)
BERT tokenizer fixes (#6498)
Key changes: * BERT conversion: fix abuse of LlamaHfVocab, do not set BOS or EOS * Nomic Embed conversion: pad vocab instead of slicing embedding tensor * llama_tokenize: handle added special tokens like HF does
Diffstat (limited to 'examples/tokenize/tokenize.cpp')
-rw-r--r--examples/tokenize/tokenize.cpp4
1 files changed, 1 insertions, 3 deletions
diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp
index d95a9247..8b1baea8 100644
--- a/examples/tokenize/tokenize.cpp
+++ b/examples/tokenize/tokenize.cpp
@@ -26,11 +26,9 @@ int main(int argc, char ** argv) {
llama_context_params ctx_params = llama_context_default_params();
llama_context * ctx = llama_new_context_with_model(model, ctx_params);
- const bool add_bos = llama_should_add_bos_token(model);
-
std::vector<llama_token> tokens;
- tokens = ::llama_tokenize(model, prompt, add_bos, true);
+ tokens = ::llama_tokenize(model, prompt, true, true);
for (int i = 0; i < (int) tokens.size(); i++) {
if (printing_ids) {