From b97bc3966e852adb626c90be64fd48282800f504 Mon Sep 17 00:00:00 2001 From: Pedro Cuenca Date: Sun, 21 Apr 2024 13:50:41 +0200 Subject: llama : support Llama 3 HF conversion (#6745) * Support Llama 3 conversion The tokenizer is BPE. * style * Accept suggestion Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> * llama : add llama_token_is_eog() ggml-ci * llama : auto-detect more EOT tokens when missing in KV data * convert : replacing EOS token is a hack * llama : fix codegemma EOT token + add TODOs * llama : fix model type string for 8B model --------- Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Co-authored-by: Georgi Gerganov --- examples/parallel/parallel.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'examples/parallel/parallel.cpp') diff --git a/examples/parallel/parallel.cpp b/examples/parallel/parallel.cpp index f66c9101..7c5595d6 100644 --- a/examples/parallel/parallel.cpp +++ b/examples/parallel/parallel.cpp @@ -359,7 +359,7 @@ int main(int argc, char ** argv) { // client.id, client.seq_id, id, client.n_decoded, client.i_batch, token_str.c_str()); if (client.n_decoded > 2 && - (id == llama_token_eos(model) || + (llama_token_is_eog(model, id) || (params.n_predict > 0 && client.n_decoded + client.n_prompt >= params.n_predict) || client.response.find("User:") != std::string::npos || client.response.find('\n') != std::string::npos)) { -- cgit v1.2.3