diff options
author | Jared Van Bortel <jared@nomic.ai> | 2024-04-09 13:44:08 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-09 13:44:08 -0400 |
commit | 1b67731e184e27a465b8c5476061294a4af668ea (patch) | |
tree | 15a2d877029fb509a34e462c227475bc7d6dc31e /common | |
parent | c4a3a4ff47d62d2503ddf9bd91b58c21f04fe3c3 (diff) |
BERT tokenizer fixes (#6498)
Key changes:
* BERT conversion: fix abuse of LlamaHfVocab, do not set BOS or EOS
* Nomic Embed conversion: pad vocab instead of slicing embedding tensor
* llama_tokenize: handle added special tokens like HF does
Diffstat (limited to 'common')
-rw-r--r-- | common/common.cpp | 16 | ||||
-rw-r--r-- | common/common.h | 8 |
2 files changed, 12 insertions, 12 deletions
diff --git a/common/common.cpp b/common/common.cpp index 7d983a45..98fc8388 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2212,23 +2212,23 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par std::vector<llama_token> llama_tokenize( const struct llama_context * ctx, const std::string & text, - bool add_bos, - bool special) { - return llama_tokenize(llama_get_model(ctx), text, add_bos, special); + bool add_special, + bool parse_special) { + return llama_tokenize(llama_get_model(ctx), text, add_special, parse_special); } std::vector<llama_token> llama_tokenize( const struct llama_model * model, const std::string & text, - bool add_bos, - bool special) { + bool add_special, + bool parse_special) { // upper limit for the number of tokens - int n_tokens = text.length() + add_bos; + int n_tokens = text.length() + 2 * add_special; std::vector<llama_token> result(n_tokens); - n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special); + n_tokens = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); if (n_tokens < 0) { result.resize(-n_tokens); - int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_bos, special); + int check = llama_tokenize(model, text.data(), text.length(), result.data(), result.size(), add_special, parse_special); GGML_ASSERT(check == -n_tokens); } else { result.resize(n_tokens); diff --git a/common/common.h b/common/common.h index 4635e05d..a7f476c1 100644 --- a/common/common.h +++ b/common/common.h @@ -223,14 +223,14 @@ void llama_batch_add( std::vector<llama_token> llama_tokenize( const struct llama_context * ctx, const std::string & text, - bool add_bos, - bool special = false); + bool add_special, + bool parse_special = false); std::vector<llama_token> llama_tokenize( const struct llama_model * model, const std::string & text, - bool add_bos, - bool special = false); + bool add_special, + bool parse_special = false); // tokenizes a token into a piece // should work similar to Python's `tokenizer.id_to_piece` |