summaryrefslogtreecommitdiff
path: root/examples/batched/batched.cpp
diff options
context:
space:
mode:
authorPedro Cuenca <pedro@huggingface.co>2024-04-21 13:50:41 +0200
committerGitHub <noreply@github.com>2024-04-21 14:50:41 +0300
commitb97bc3966e852adb626c90be64fd48282800f504 (patch)
tree178656d15821205889fa03ec603c7327facbb265 /examples/batched/batched.cpp
parentb8109bc0139f15a5b321909f47510b89dca47ffc (diff)
llama : support Llama 3 HF conversion (#6745)
* Support Llama 3 conversion The tokenizer is BPE. * style * Accept suggestion Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> * llama : add llama_token_is_eog() ggml-ci * llama : auto-detect more EOT tokens when missing in KV data * convert : replacing EOS token is a hack * llama : fix codegemma EOT token + add TODOs * llama : fix model type string for 8B model --------- Co-authored-by: Sourab Mangrulkar <13534540+pacman100@users.noreply.github.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'examples/batched/batched.cpp')
-rw-r--r--examples/batched/batched.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp
index 7aaf63ce..be30d20b 100644
--- a/examples/batched/batched.cpp
+++ b/examples/batched/batched.cpp
@@ -191,8 +191,8 @@ int main(int argc, char ** argv) {
//const llama_token new_token_id = llama_sample_token_greedy(ctx, &candidates_p);
- // is it an end of stream? -> mark the stream as finished
- if (new_token_id == llama_token_eos(model) || n_cur == n_len) {
+ // is it an end of generation? -> mark the stream as finished
+ if (llama_token_is_eog(model, new_token_id) || n_cur == n_len) {
i_batch[i] = -1;
LOG_TEE("\n");
if (n_parallel > 1) {