From 75cd4c77292034ecec587ecb401366f57338f7c0 Mon Sep 17 00:00:00 2001 From: Pierrick Hymbert Date: Sat, 6 Apr 2024 05:40:47 +0200 Subject: ci: bench: support sse and fix prompt processing time / server: add tokens usage in stream OAI response (#6495) * ci: bench: support sse and fix prompt processing time server: add tokens usage in stream mode * ci: bench: README.md EOL * ci: bench: remove total pp and tg as it is not accurate * ci: bench: fix case when there is no token generated * ci: bench: change to the 95 percentile for pp and tg as it is closer to what the server exports in metrics * ci: bench: fix finish reason rate --- examples/server/utils.hpp | 9 +++++++++ 1 file changed, 9 insertions(+) (limited to 'examples/server/utils.hpp') diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 47cc53c2..a8d43ac6 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -567,6 +567,15 @@ static std::vector format_partial_response_oaicompat(json result, const st {"model", modelname}, {"object", "chat.completion.chunk"} }; + if (!finish_reason.empty()) { + int num_tokens_predicted = json_value(result, "tokens_predicted", 0); + int num_prompt_tokens = json_value(result, "tokens_evaluated", 0); + ret.push_back({"usage", json { + {"completion_tokens", num_tokens_predicted}, + {"prompt_tokens", num_prompt_tokens}, + {"total_tokens", num_tokens_predicted + num_prompt_tokens} + }}); + } return std::vector({ret}); } -- cgit v1.2.3