summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/common.cpp12
-rw-r--r--common/common.h6
-rw-r--r--common/train.cpp8
3 files changed, 15 insertions, 11 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 9c4f7df2..3e4b8a8c 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -879,21 +879,23 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
std::vector<llama_token> llama_tokenize(
const struct llama_context * ctx,
const std::string & text,
- bool add_bos) {
- return llama_tokenize(llama_get_model(ctx), text, add_bos);
+ bool add_bos,
+ bool special) {
+ return llama_tokenize(llama_get_model(ctx), text, add_bos, special);
}
std::vector<llama_token> llama_tokenize(
const struct llama_model * model,
const std::string & text,
- bool add_bos) {
+ bool add_bos,
+ bool special) {
// upper limit for the number of tokens
int n_tokens = text.length() + add_bos;
std::vector<llama_token> result(n_tokens);
- n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos);
+ n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
if (n_tokens < 0) {
result.resize(-n_tokens);
- int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos);
+ int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
GGML_ASSERT(check == -n_tokens);
} else {
result.resize(n_tokens);
diff --git a/common/common.h b/common/common.h
index 36fd4416..08c60323 100644
--- a/common/common.h
+++ b/common/common.h
@@ -137,12 +137,14 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param
std::vector<llama_token> llama_tokenize(
const struct llama_context * ctx,
const std::string & text,
- bool add_bos);
+ bool add_bos,
+ bool special = false);
std::vector<llama_token> llama_tokenize(
const struct llama_model * model,
const std::string & text,
- bool add_bos);
+ bool add_bos,
+ bool special = false);
// tokenizes a token into a piece
// should work similar to Python's `tokenizer.id_to_piece`
diff --git a/common/train.cpp b/common/train.cpp
index 35a4cf9e..972eaefe 100644
--- a/common/train.cpp
+++ b/common/train.cpp
@@ -863,7 +863,7 @@ size_t tokenize_file(
(int) buf.size(),
out_tokens.data(),
(int) out_tokens.size(),
- false);
+ false, false);
if (n_tokens < 0) {
out_tokens.resize(-n_tokens);
n_tokens = llama_tokenize(
@@ -872,7 +872,7 @@ size_t tokenize_file(
(int) buf.size(),
out_tokens.data(),
(int) out_tokens.size(),
- false);
+ false, false);
}
if (n_tokens >= 0) {
out_tokens.resize(n_tokens);
@@ -966,7 +966,7 @@ size_t tokenize_file(
(int) buf_sample.size(),
tok_sample.data(),
(int) tok_sample.size(),
- false);
+ false, false);
if (n_tokens < 0) {
tok_sample.resize(-n_tokens);
n_tokens = llama_tokenize(llama_get_model(lctx),
@@ -974,7 +974,7 @@ size_t tokenize_file(
(int) buf_sample.size(),
tok_sample.data(),
(int) tok_sample.size(),
- false);
+ false, false);
GGML_ASSERT(n_tokens >= 0);
}
GGML_ASSERT(n_tokens <= (int) tok_sample.size());