diff options
author | Jared Van Bortel <jared@nomic.ai> | 2024-04-09 13:44:08 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-09 13:44:08 -0400 |
commit | 1b67731e184e27a465b8c5476061294a4af668ea (patch) | |
tree | 15a2d877029fb509a34e462c227475bc7d6dc31e /examples/lookahead | |
parent | c4a3a4ff47d62d2503ddf9bd91b58c21f04fe3c3 (diff) |
BERT tokenizer fixes (#6498)
Key changes:
* BERT conversion: fix abuse of LlamaHfVocab, do not set BOS or EOS
* Nomic Embed conversion: pad vocab instead of slicing embedding tensor
* llama_tokenize: handle added special tokens like HF does
Diffstat (limited to 'examples/lookahead')
-rw-r--r-- | examples/lookahead/lookahead.cpp | 5 |
1 files changed, 1 insertions, 4 deletions
diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index e2551e7a..5af6a8ab 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -64,13 +64,10 @@ int main(int argc, char ** argv) { std::tie(model, ctx) = llama_init_from_gpt_params(params); // Tokenize the prompt - const bool add_bos = llama_should_add_bos_token(model); - LOG("add_bos tgt: %d\n", add_bos); - std::vector<llama_token> inp; std::vector<llama_token> all; - inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + inp = ::llama_tokenize(ctx, params.prompt, true, true); all = inp; const int max_context_size = llama_n_ctx(ctx); |