summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/common.cpp16
-rw-r--r--common/common.h8
2 files changed, 12 insertions, 12 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 7d983a45..98fc8388 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -2212,23 +2212,23 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
std::vector<llama_token> llama_tokenize(
const struct llama_context * ctx,
const std::string & text,
- bool add_bos,
- bool special) {
- return llama_tokenize(llama_get_model(ctx), text, add_bos, special);
+ bool add_special,
+ bool parse_special) {
+ return llama_tokenize(llama_get_model(ctx), text, add_special, parse_special);
}
std::vector<llama_token> llama_tokenize(
const struct llama_model * model,
const std::string & text,
- bool add_bos,
- bool special) {
+ bool add_special,
+ bool parse_special) {
// upper limit for the number of tokens
- int n_tokens = text.length() + add_bos;
+ int n_tokens = text.length() + 2 * add_special;
std::vector<llama_token> result(n_tokens);
- n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
+ n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
if (n_tokens < 0) {
result.resize(-n_tokens);
- int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
+ int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
GGML_ASSERT(check == -n_tokens);
} else {
result.resize(n_tokens);
diff --git a/common/common.h b/common/common.h
index 4635e05d..a7f476c1 100644
--- a/common/common.h
+++ b/common/common.h
@@ -223,14 +223,14 @@ void llama_batch_add(
std::vector<llama_token> llama_tokenize(
const struct llama_context * ctx,
const std::string & text,
- bool add_bos,
- bool special = false);
+ bool add_special,
+ bool parse_special = false);
std::vector<llama_token> llama_tokenize(
const struct llama_model * model,
const std::string & text,
- bool add_bos,
- bool special = false);
+ bool add_special,
+ bool parse_special = false);
// tokenizes a token into a piece
// should work similar to Python's `tokenizer.id_to_piece`