diff options
author | Jared Van Bortel <jared@nomic.ai> | 2024-04-09 13:44:08 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-09 13:44:08 -0400 |
commit | 1b67731e184e27a465b8c5476061294a4af668ea (patch) | |
tree | 15a2d877029fb509a34e462c227475bc7d6dc31e /examples | |
parent | c4a3a4ff47d62d2503ddf9bd91b58c21f04fe3c3 (diff) |
BERT tokenizer fixes (#6498)
Key changes:
* BERT conversion: fix abuse of LlamaHfVocab, do not set BOS or EOS
* Nomic Embed conversion: pad vocab instead of slicing embedding tensor
* llama_tokenize: handle added special tokens like HF does
Diffstat (limited to 'examples')
-rw-r--r-- | examples/embedding/embedding.cpp | 6 | ||||
-rw-r--r-- | examples/imatrix/imatrix.cpp | 3 | ||||
-rw-r--r-- | examples/infill/infill.cpp | 5 | ||||
-rw-r--r-- | examples/llava/llava-cli.cpp | 3 | ||||
-rw-r--r-- | examples/lookahead/lookahead.cpp | 5 | ||||
-rw-r--r-- | examples/lookup/lookup-create.cpp | 4 | ||||
-rw-r--r-- | examples/lookup/lookup-stats.cpp | 5 | ||||
-rw-r--r-- | examples/lookup/lookup.cpp | 5 | ||||
-rw-r--r-- | examples/main/main.cpp | 13 | ||||
-rw-r--r-- | examples/perplexity/perplexity.cpp | 38 | ||||
-rw-r--r-- | examples/server/server.cpp | 11 | ||||
-rw-r--r-- | examples/speculative/speculative.cpp | 36 | ||||
-rw-r--r-- | examples/tokenize/tokenize.cpp | 4 |
13 files changed, 66 insertions, 72 deletions
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 53665752..6a93147d 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -123,10 +123,10 @@ int main(int argc, char ** argv) { inputs.push_back(inp); } - // add eos if not present + // add SEP if not present for (auto & inp : inputs) { - if (inp.empty() || inp.back() != llama_token_eos(model)) { - inp.push_back(llama_token_eos(model)); + if (inp.empty() || inp.back() != llama_token_sep(model)) { + inp.push_back(llama_token_sep(model)); } } diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp index d8cb0a64..1bf55f90 100644 --- a/examples/imatrix/imatrix.cpp +++ b/examples/imatrix/imatrix.cpp @@ -349,12 +349,13 @@ static void process_logits( static bool compute_imatrix(llama_context * ctx, const gpt_params & params, bool compute_ppl, int from_chunk) { const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1); const int n_ctx = llama_n_ctx(ctx); auto tim1 = std::chrono::high_resolution_clock::now(); fprintf(stderr, "%s: tokenizing the input ..\n", __func__); - std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos); + std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true); auto tim2 = std::chrono::high_resolution_clock::now(); fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count()); diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp index 91c39c5a..c69dcd06 100644 --- a/examples/infill/infill.cpp +++ b/examples/infill/infill.cpp @@ -239,6 +239,7 @@ int main(int argc, char ** argv) { LOG_TEE("%s\n", get_system_info(params).c_str()); } const bool add_bos = llama_should_add_bos_token(model); + GGML_ASSERT(llama_add_eos_token(model) != 1); LOG("add_bos: %d\n", add_bos); bool suff_rm_leading_spc = params.escape; @@ -279,10 +280,10 @@ int main(int argc, char ** argv) { if (ctx_guidance) { LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt)); - guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos); + guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, true); LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str()); - std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos); + std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, true); LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str()); original_prompt_len = original_inp.size(); diff --git a/examples/llava/llava-cli.cpp b/examples/llava/llava-cli.cpp index e29da6cb..75948806 100644 --- a/examples/llava/llava-cli.cpp +++ b/examples/llava/llava-cli.cpp @@ -146,7 +146,6 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_ int n_past = 0; const int max_tgt_len = params->n_predict < 0 ? 256 : params->n_predict; - const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx_llava->ctx_llama)); std::string system_prompt, user_prompt; size_t image_pos = prompt.find("<image>"); @@ -180,7 +179,7 @@ static void process_prompt(struct llava_context * ctx_llava, struct llava_image_ } } - eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, add_bos); + eval_string(ctx_llava->ctx_llama, system_prompt.c_str(), params->n_batch, &n_past, true); llava_eval_image_embed(ctx_llava->ctx_llama, image_embed, params->n_batch, &n_past); eval_string(ctx_llava->ctx_llama, user_prompt.c_str(), params->n_batch, &n_past, false); diff --git a/examples/lookahead/lookahead.cpp b/examples/lookahead/lookahead.cpp index e2551e7a..5af6a8ab 100644 --- a/examples/lookahead/lookahead.cpp +++ b/examples/lookahead/lookahead.cpp @@ -64,13 +64,10 @@ int main(int argc, char ** argv) { std::tie(model, ctx) = llama_init_from_gpt_params(params); // Tokenize the prompt - const bool add_bos = llama_should_add_bos_token(model); - LOG("add_bos tgt: %d\n", add_bos); - std::vector<llama_token> inp; std::vector<llama_token> all; - inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + inp = ::llama_tokenize(ctx, params.prompt, true, true); all = inp; const int max_context_size = llama_n_ctx(ctx); diff --git a/examples/lookup/lookup-create.cpp b/examples/lookup/lookup-create.cpp index 46a6bed0..1c230c96 100644 --- a/examples/lookup/lookup-create.cpp +++ b/examples/lookup/lookup-create.cpp @@ -28,10 +28,8 @@ int main(int argc, char ** argv){ GGML_ASSERT(model != nullptr); // tokenize the prompt - const bool add_bos = llama_should_add_bos_token(model); - std::vector<llama_token> inp; - inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + inp = ::llama_tokenize(ctx, params.prompt, true, true); fprintf(stderr, "%s: tokenization done\n", __func__); diff --git a/examples/lookup/lookup-stats.cpp b/examples/lookup/lookup-stats.cpp index 31f22777..41b62c2f 100644 --- a/examples/lookup/lookup-stats.cpp +++ b/examples/lookup/lookup-stats.cpp @@ -34,11 +34,8 @@ int main(int argc, char ** argv){ GGML_ASSERT(llama_n_vocab(model) < (1 << 16)); // tokenize the prompt - const bool add_bos = llama_should_add_bos_token(model); - LOG("add_bos tgt: %d\n", add_bos); - std::vector<llama_token> inp; - inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + inp = ::llama_tokenize(ctx, params.prompt, true, true); llama_ngram_cache ngram_cache_context; llama_ngram_cache ngram_cache_dynamic; diff --git a/examples/lookup/lookup.cpp b/examples/lookup/lookup.cpp index 2e8c35de..65ed408a 100644 --- a/examples/lookup/lookup.cpp +++ b/examples/lookup/lookup.cpp @@ -42,11 +42,8 @@ int main(int argc, char ** argv){ GGML_ASSERT(llama_n_vocab(model) < (1 << 16)); // tokenize the prompt - const bool add_bos = llama_should_add_bos_token(model); - LOG("add_bos tgt: %d\n", add_bos); - std::vector<llama_token> inp; - inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + inp = ::llama_tokenize(ctx, params.prompt, true, true); llama_ngram_cache ngram_cache_context; llama_ngram_cache ngram_cache_dynamic; diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 711f162d..249fc2bb 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -246,6 +246,7 @@ int main(int argc, char ** argv) { } const bool add_bos = llama_should_add_bos_token(model); + GGML_ASSERT(llama_add_eos_token(model) != 1); LOG("add_bos: %d\n", add_bos); std::vector<llama_token> embd_inp; @@ -255,7 +256,7 @@ int main(int argc, char ** argv) { if (params.chatml) { params.prompt = "<|im_start|>system\n" + params.prompt + "<|im_end|>"; } - embd_inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + embd_inp = ::llama_tokenize(ctx, params.prompt, true, true); } else { LOG("use session tokens\n"); embd_inp = session_tokens; @@ -277,10 +278,10 @@ int main(int argc, char ** argv) { if (ctx_guidance) { LOG("cfg_negative_prompt: \"%s\"\n", log_tostr(sparams.cfg_negative_prompt)); - guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, add_bos, true); + guidance_inp = ::llama_tokenize(ctx_guidance, sparams.cfg_negative_prompt, true, true); LOG("guidance_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_guidance, guidance_inp).c_str()); - std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, add_bos, true); + std::vector<llama_token> original_inp = ::llama_tokenize(ctx, params.prompt, true, true); LOG("original_inp tokenized: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, original_inp).c_str()); original_prompt_len = original_inp.size(); @@ -339,14 +340,14 @@ int main(int argc, char ** argv) { } // prefix & suffix for instruct mode - const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", add_bos, true); - const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false, true); + const auto inp_pfx = ::llama_tokenize(ctx, "\n\n### Instruction:\n\n", true, true); + const auto inp_sfx = ::llama_tokenize(ctx, "\n\n### Response:\n\n", false, true); LOG("inp_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_pfx).c_str()); LOG("inp_sfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, inp_sfx).c_str()); // chatml prefix & suffix - const auto cml_pfx = ::llama_tokenize(ctx, "\n<|im_start|>user\n", add_bos, true); + const auto cml_pfx = ::llama_tokenize(ctx, "\n<|im_start|>user\n", true, true); const auto cml_sfx = ::llama_tokenize(ctx, "<|im_end|>\n<|im_start|>assistant\n", false, true); LOG("cml_pfx: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, cml_pfx).c_str()); diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index c70385c6..bab79aae 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -315,10 +315,11 @@ static results_perplexity perplexity_v2(llama_context * ctx, const gpt_params & // BOS tokens will be added for each chunk before eval const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1); fprintf(stderr, "%s: tokenizing the input ..\n", __func__); - std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos); + std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true); const int n_ctx = llama_n_ctx(ctx); @@ -454,6 +455,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par // BOS tokens will be added for each chunk before eval const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1); std::ofstream logits_stream; if (!params.logits_file.empty()) { @@ -470,7 +472,7 @@ static results_perplexity perplexity(llama_context * ctx, const gpt_params & par auto tim1 = std::chrono::high_resolution_clock::now(); fprintf(stderr, "%s: tokenizing the input ..\n", __func__); - std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos); + std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true); auto tim2 = std::chrono::high_resolution_clock::now(); fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count()); @@ -771,9 +773,6 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) { const bool is_spm = llama_vocab_type(llama_get_model(ctx)) == LLAMA_VOCAB_TYPE_SPM; fprintf(stderr, "================================= is_spm = %d\n", is_spm); - // This is needed as usual for LLaMA models - const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); - // The tasks should be randomized so the score stabilizes quickly. bool randomize_tasks = true; @@ -818,7 +817,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) { hs_cur.gold_ending_idx = std::stoi( prompt_lines[idx*6+1] ); for (size_t j = 0; j < 4; j++) { hs_cur.ending[j] = prompt_lines[idx*6+2+j]; - hs_cur.seq_tokens[j] = ::llama_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], add_bos); + hs_cur.seq_tokens[j] = ::llama_tokenize(ctx, hs_cur.context + " " + hs_cur.ending[j], true); } // determine the common prefix of the endings @@ -837,7 +836,7 @@ static void hellaswag_score(llama_context * ctx, const gpt_params & params) { hs_cur.seq_tokens[2].size() - hs_cur.common_prefix + hs_cur.seq_tokens[3].size() - hs_cur.common_prefix; - //GGML_ASSERT(hs_cur.common_prefix >= ::llama_tokenize(ctx, hs_cur.context, add_bos).size()); + //GGML_ASSERT(hs_cur.common_prefix >= ::llama_tokenize(ctx, hs_cur.context, true).size()); // Delete the selected random example from the prompt if (randomize_tasks) { @@ -1110,12 +1109,9 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) { fprintf(stderr, "%s : tokenizing selected tasks\n", __func__); - // This is needed as usual for LLaMA models - const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); - for (auto & task : data) { - task.seq_tokens[0] = ::llama_tokenize(ctx, task.first + task.choices[0] + task.second, add_bos); - task.seq_tokens[1] = ::llama_tokenize(ctx, task.first + task.choices[1] + task.second, add_bos); + task.seq_tokens[0] = ::llama_tokenize(ctx, task.first + task.choices[0] + task.second, true); + task.seq_tokens[1] = ::llama_tokenize(ctx, task.first + task.choices[1] + task.second, true); task.common_prefix = 0; for (size_t k = 0; k < task.seq_tokens[0].size(); k++) { @@ -1130,8 +1126,8 @@ static void winogrande_score(llama_context * ctx, const gpt_params & params) { task.seq_tokens[0].size() - task.common_prefix + task.seq_tokens[1].size() - task.common_prefix; - task.n_base1 = ::llama_tokenize(ctx, task.first + task.choices[0], add_bos).size(); - task.n_base2 = ::llama_tokenize(ctx, task.first + task.choices[1], add_bos).size(); + task.n_base1 = ::llama_tokenize(ctx, task.first + task.choices[0], true).size(); + task.n_base2 = ::llama_tokenize(ctx, task.first + task.choices[1], true).size(); } fprintf(stderr, "%s : calculating winogrande score over selected tasks.\n", __func__); @@ -1322,7 +1318,7 @@ struct multiple_choice_task { std::vector<float> log_probs; }; -static bool multiple_choice_prepare_one_task(llama_context * ctx, bool add_bos, multiple_choice_task& task, bool log_error) { +static bool multiple_choice_prepare_one_task(llama_context * ctx, multiple_choice_task& task, bool log_error) { if (task.question.empty() || task.mc1.answers.empty()) { if (log_error) { printf("%s: found bad task with empty question and/or answers\n", __func__); @@ -1337,7 +1333,7 @@ static bool multiple_choice_prepare_one_task(llama_context * ctx, bool add_bos, } return false; } - task.seq_tokens.emplace_back(::llama_tokenize(ctx, task.question + " " + answer, add_bos)); + task.seq_tokens.emplace_back(::llama_tokenize(ctx, task.question + " " + answer, true)); } auto min_len = task.seq_tokens.front().size(); for (auto& seq : task.seq_tokens) { @@ -1436,9 +1432,6 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params n_task = params.multiple_choice_tasks; } - // This is needed as usual for LLaMA models - const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); - printf("%s: preparing task data", __func__); fflush(stdout); if (n_task > 500) { @@ -1446,7 +1439,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params fflush(stdout); std::atomic<int> counter(0); std::atomic<int> n_bad(0); - auto prepare = [&counter, &n_bad, &tasks, ctx, add_bos] () { + auto prepare = [&counter, &n_bad, &tasks, ctx] () { int num_tasks = tasks.size(); int n_bad_local = 0; while (true) { @@ -1457,7 +1450,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params } int last = std::min(first + K_TOKEN_CHUNK, num_tasks); for (int i = first; i < last; ++i) { - if (!multiple_choice_prepare_one_task(ctx, add_bos, tasks[i], false)) ++n_bad_local; + if (!multiple_choice_prepare_one_task(ctx, tasks[i], false)) ++n_bad_local; } } }; @@ -1479,7 +1472,7 @@ static void multiple_choice_score(llama_context * ctx, const gpt_params & params int i_task = 0; for (auto& task : tasks) { ++i_task; - if (!multiple_choice_prepare_one_task(ctx, add_bos, task, true)) { + if (!multiple_choice_prepare_one_task(ctx, task, true)) { return; } if (i_task%n_dot == 0) { @@ -1715,6 +1708,7 @@ static void kl_divergence(llama_context * ctx, const gpt_params & params) { const int num_batches = (n_ctx + n_batch - 1)/n_batch; const int nv = 2*((n_vocab + 1)/2) + 4; const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx)); + GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1); std::vector<uint16_t> log_probs_uint16(size_t(n_ctx - 1 - n_ctx/2) * nv); std::vector<float> kld_values(size_t(n_ctx - 1 - n_ctx/2)*n_chunk); diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 6c64fe3e..2e791190 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -689,6 +689,7 @@ struct server_context { n_ctx = llama_n_ctx(ctx); add_bos_token = llama_should_add_bos_token(model); + GGML_ASSERT(llama_add_eos_token(model) != 1); return true; } @@ -758,7 +759,7 @@ struct server_context { metrics.init(); } - std::vector<llama_token> tokenize(const json & json_prompt, bool add_bos) const { + std::vector<llama_token> tokenize(const json & json_prompt, bool add_special) const { // TODO: currently, we tokenize using special tokens by default // this is not always correct (see https://github.com/ggerganov/llama.cpp/pull/4160#issuecomment-1824826216) // but it's better compared to completely ignoring ChatML and other chat templates @@ -776,7 +777,7 @@ struct server_context { std::vector<llama_token> p; if (first) { - p = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL); + p = ::llama_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL); first = false; } else { p = ::llama_tokenize(ctx, s, false, TMP_FORCE_SPECIAL); @@ -793,7 +794,7 @@ struct server_context { } } else { auto s = json_prompt.template get<std::string>(); - prompt_tokens = ::llama_tokenize(ctx, s, add_bos, TMP_FORCE_SPECIAL); + prompt_tokens = ::llama_tokenize(ctx, s, add_special, TMP_FORCE_SPECIAL); } return prompt_tokens; @@ -1058,7 +1059,7 @@ struct server_context { system_tokens.clear(); if (!system_prompt.empty()) { - system_tokens = ::llama_tokenize(ctx, system_prompt, add_bos_token); + system_tokens = ::llama_tokenize(ctx, system_prompt, true); llama_batch_clear(batch); @@ -1914,7 +1915,7 @@ struct server_context { prefix_tokens.push_back(llama_token_middle(model)); prompt_tokens = prefix_tokens; } else { - prompt_tokens = tokenize(slot.prompt, system_prompt.empty() && add_bos_token); // add BOS if there isn't system prompt + prompt_tokens = tokenize(slot.prompt, system_prompt.empty()); // add BOS if there isn't system prompt } slot.n_past = 0; diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp index 6e0815b3..6a7367b0 100644 --- a/examples/speculative/speculative.cpp +++ b/examples/speculative/speculative.cpp @@ -76,6 +76,28 @@ int main(int argc, char ** argv) { params.n_threads_batch = params.n_threads_batch_draft; std::tie(model_dft, ctx_dft) = llama_init_from_gpt_params(params); + const bool vocab_type_tgt = llama_vocab_type(model_tgt); + LOG("vocab_type tgt: %d\n", vocab_type_tgt); + + const bool vocab_type_dft = llama_vocab_type(model_dft); + LOG("vocab_type dft: %d\n", vocab_type_dft); + + if (vocab_type_tgt != vocab_type_dft) { + fprintf(stderr, "%s: error: draft model vocab type must match target model to use speculation but ", __func__); + fprintf(stderr, "vocab_type_dft = %d while vocab_type_tgt = %d\n", vocab_type_dft, vocab_type_tgt); + return 1; + } + + if ( + llama_add_bos_token(model_tgt) != llama_add_bos_token(model_dft) || + llama_add_eos_token(model_tgt) != llama_add_eos_token(model_dft) || + llama_token_bos(model_tgt) != llama_token_bos(model_dft) || + llama_token_eos(model_tgt) != llama_token_eos(model_dft) + ) { + fprintf(stderr, "%s: error: draft model special tokens must match target model to use speculation\n", __func__); + return 1; + } + { const int n_vocab_tgt = llama_n_vocab(model_tgt); const int n_vocab_dft = llama_n_vocab(model_dft); @@ -105,20 +127,8 @@ int main(int argc, char ** argv) { // Tokenize the prompt - const bool add_bos_tgt = llama_should_add_bos_token(model_tgt); - LOG("add_bos tgt: %d\n", add_bos_tgt); - - const bool add_bos_dft = llama_should_add_bos_token(model_dft); - LOG("add_bos dft: %d\n", add_bos_dft); - - if (add_bos_tgt != add_bos_dft) { - fprintf(stderr, "%s: error: draft model add_bos must match target model to use speculation but ", __func__); - fprintf(stderr, "add_bos_dft = %d while add_bos_tgt = %d\n", add_bos_dft, add_bos_tgt); - return 1; - } - std::vector<llama_token> inp; - inp = ::llama_tokenize(ctx_tgt, params.prompt, add_bos_tgt, true); + inp = ::llama_tokenize(ctx_tgt, params.prompt, true, true); const int max_context_size = llama_n_ctx(ctx_tgt); const int max_tokens_list_size = max_context_size - 4; diff --git a/examples/tokenize/tokenize.cpp b/examples/tokenize/tokenize.cpp index d95a9247..8b1baea8 100644 --- a/examples/tokenize/tokenize.cpp +++ b/examples/tokenize/tokenize.cpp @@ -26,11 +26,9 @@ int main(int argc, char ** argv) { llama_context_params ctx_params = llama_context_default_params(); llama_context * ctx = llama_new_context_with_model(model, ctx_params); - const bool add_bos = llama_should_add_bos_token(model); - std::vector<llama_token> tokens; - tokens = ::llama_tokenize(model, prompt, add_bos, true); + tokens = ::llama_tokenize(model, prompt, true, true); for (int i = 0; i < (int) tokens.size(); i++) { if (printing_ids) { |