summaryrefslogtreecommitdiff
path: root/examples/server
diff options
context:
space:
mode:
Diffstat (limited to 'examples/server')
-rw-r--r--examples/server/server.cpp2
-rw-r--r--examples/server/utils.hpp4
2 files changed, 1 insertions, 5 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 634e653a..25bc2963 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -1201,7 +1201,7 @@ struct server_context {
});
}
- if (result.tok == llama_token_eos(model)) {
+ if (llama_token_is_eog(model, result.tok)) {
slot.stopped_eos = true;
slot.has_next_token = false;
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp
index a8d43ac6..1a221250 100644
--- a/examples/server/utils.hpp
+++ b/examples/server/utils.hpp
@@ -381,10 +381,6 @@ static json oaicompat_completion_params_parse(
} else {
llama_params["stop"] = json_value(body, "stop", json::array());
}
- // Some chat templates don't use EOS token to stop generation
- // We must add their end sequences to list of stop words
- llama_params["stop"].push_back("<|im_end|>"); // chatml
- llama_params["stop"].push_back("<end_of_turn>"); // gemma
// Handle "response_format" field
if (body.contains("response_format")) {