diff options
author | Pierrick Hymbert <pierrick.hymbert@gmail.com> | 2024-04-06 05:40:47 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-06 05:40:47 +0200 |
commit | 75cd4c77292034ecec587ecb401366f57338f7c0 (patch) | |
tree | de137718780505410bc75ce219f4bc164961c4fd /examples/server/utils.hpp | |
parent | a8bd14d55717754a1f48313a846a2b16fa998ad2 (diff) |
ci: bench: support sse and fix prompt processing time / server: add tokens usage in stream OAI response (#6495)
* ci: bench: support sse and fix prompt processing time
server: add tokens usage in stream mode
* ci: bench: README.md EOL
* ci: bench: remove total pp and tg as it is not accurate
* ci: bench: fix case when there is no token generated
* ci: bench: change to the 95 percentile for pp and tg as it is closer to what the server exports in metrics
* ci: bench: fix finish reason rate
Diffstat (limited to 'examples/server/utils.hpp')
-rw-r--r-- | examples/server/utils.hpp | 9 |
1 files changed, 9 insertions, 0 deletions
diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index 47cc53c2..a8d43ac6 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -567,6 +567,15 @@ static std::vector<json> format_partial_response_oaicompat(json result, const st {"model", modelname}, {"object", "chat.completion.chunk"} }; + if (!finish_reason.empty()) { + int num_tokens_predicted = json_value(result, "tokens_predicted", 0); + int num_prompt_tokens = json_value(result, "tokens_evaluated", 0); + ret.push_back({"usage", json { + {"completion_tokens", num_tokens_predicted}, + {"prompt_tokens", num_prompt_tokens}, + {"total_tokens", num_tokens_predicted + num_prompt_tokens} + }}); + } return std::vector<json>({ret}); } |