summaryrefslogtreecommitdiff
path: root/common/common.cpp
diff options
context:
space:
mode:
authorJared Van Bortel <jared@nomic.ai>2024-04-09 13:44:08 -0400
committerGitHub <noreply@github.com>2024-04-09 13:44:08 -0400
commit1b67731e184e27a465b8c5476061294a4af668ea (patch)
tree15a2d877029fb509a34e462c227475bc7d6dc31e /common/common.cpp
parentc4a3a4ff47d62d2503ddf9bd91b58c21f04fe3c3 (diff)
BERT tokenizer fixes (#6498)
Key changes: * BERT conversion: fix abuse of LlamaHfVocab, do not set BOS or EOS * Nomic Embed conversion: pad vocab instead of slicing embedding tensor * llama_tokenize: handle added special tokens like HF does
Diffstat (limited to 'common/common.cpp')
-rw-r--r--common/common.cpp16
1 files changed, 8 insertions, 8 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 7d983a45..98fc8388 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -2212,23 +2212,23 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par
std::vector<llama_token> llama_tokenize(
const struct llama_context * ctx,
const std::string & text,
- bool add_bos,
- bool special) {
- return llama_tokenize(llama_get_model(ctx), text, add_bos, special);
+ bool add_special,
+ bool parse_special) {
+ return llama_tokenize(llama_get_model(ctx), text, add_special, parse_special);
}
std::vector<llama_token> llama_tokenize(
const struct llama_model * model,
const std::string & text,
- bool add_bos,
- bool special) {
+ bool add_special,
+ bool parse_special) {
// upper limit for the number of tokens
- int n_tokens = text.length() + add_bos;
+ int n_tokens = text.length() + 2 * add_special;
std::vector<llama_token> result(n_tokens);
- n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
+ n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
if (n_tokens < 0) {
result.resize(-n_tokens);
- int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special);
+ int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special);
GGML_ASSERT(check == -n_tokens);
} else {
result.resize(n_tokens);