summaryrefslogtreecommitdiff
path: root/examples/imatrix
diff options
context:
space:
mode:
authorJared Van Bortel <jared@nomic.ai>2024-04-09 13:44:08 -0400
committerGitHub <noreply@github.com>2024-04-09 13:44:08 -0400
commit1b67731e184e27a465b8c5476061294a4af668ea (patch)
tree15a2d877029fb509a34e462c227475bc7d6dc31e /examples/imatrix
parentc4a3a4ff47d62d2503ddf9bd91b58c21f04fe3c3 (diff)
BERT tokenizer fixes (#6498)
Key changes: * BERT conversion: fix abuse of LlamaHfVocab, do not set BOS or EOS * Nomic Embed conversion: pad vocab instead of slicing embedding tensor * llama_tokenize: handle added special tokens like HF does
Diffstat (limited to 'examples/imatrix')
-rw-r--r--examples/imatrix/imatrix.cpp3
1 files changed, 2 insertions, 1 deletions
diff --git a/examples/imatrix/imatrix.cpp b/examples/imatrix/imatrix.cpp
index d8cb0a64..1bf55f90 100644
--- a/examples/imatrix/imatrix.cpp
+++ b/examples/imatrix/imatrix.cpp
@@ -349,12 +349,13 @@ static void process_logits(
static bool compute_imatrix(llama_context * ctx, const gpt_params & params, bool compute_ppl, int from_chunk) {
const bool add_bos = llama_should_add_bos_token(llama_get_model(ctx));
+ GGML_ASSERT(llama_add_eos_token(llama_get_model(ctx)) != 1);
const int n_ctx = llama_n_ctx(ctx);
auto tim1 = std::chrono::high_resolution_clock::now();
fprintf(stderr, "%s: tokenizing the input ..\n", __func__);
- std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, add_bos);
+ std::vector<llama_token> tokens = ::llama_tokenize(ctx, params.prompt, true);
auto tim2 = std::chrono::high_resolution_clock::now();
fprintf(stderr, "%s: tokenization took %g ms\n",__func__,1e-3*std::chrono::duration_cast<std::chrono::microseconds>(tim2-tim1).count());