summaryrefslogtreecommitdiff
path: root/examples/infill/infill.cpp
diff options
context:
space:
mode:
authorKerfuffle <44031344+KerfuffleV2@users.noreply.github.com>2023-11-16 19:14:37 -0700
committerGitHub <noreply@github.com>2023-11-16 19:14:37 -0700
commit91f6499393d2d999331fbfdba47a7f8b9f913f0d (patch)
tree27caf3ad0b9cec979bb5ed3317b5334bdcd9470c /examples/infill/infill.cpp
parent8da46278e1a57107591653275f8e03a281de94f0 (diff)
Respect tokenizer.ggml.add_bos_token value when tokenizing (#4040)
* gguf-py: gguf-dump: Respect --no-tensor flag in JSON mode. * Respect add_bos_token GGUF metadata value * gguf-py: Try to fix SpecialVocab giving up too easily for the Nth time
Diffstat (limited to 'examples/infill/infill.cpp')
-rw-r--r--examples/infill/infill.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp
index 62f5ce3c..11f7410e 100644
--- a/examples/infill/infill.cpp
+++ b/examples/infill/infill.cpp
@@ -230,7 +230,7 @@ int main(int argc, char ** argv) {
LOG_TEE("\n");
LOG_TEE("%s\n", get_system_info(params).c_str());
}
- const bool add_bos = llama_vocab_type(model) == LLAMA_VOCAB_TYPE_SPM;
+ const bool add_bos = llama_should_add_bos_token(model);
LOG("add_bos: %d\n", add_bos);
bool suff_rm_leading_spc = params.escape;