diff options
Diffstat (limited to 'examples/server/server.cpp')
-rw-r--r-- | examples/server/server.cpp | 21 |
1 files changed, 21 insertions, 0 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 3300553f..a4b4d641 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -1104,6 +1104,12 @@ static json format_tokenizer_response(const std::vector<llama_token> &tokens) {"tokens", tokens}}; } +static json format_detokenized_response(std::string content) +{ + return json{ + {"content", content}}; +} + template <typename T> static T json_value(const json &body, const std::string &key, const T &default_value) { @@ -1501,6 +1507,21 @@ int main(int argc, char **argv) const json data = format_tokenizer_response(tokens); return res.set_content(data.dump(), "application/json"); }); + svr.Post("/detokenize", [&llama](const Request &req, Response &res) + { + auto lock = llama.lock(); + + const json body = json::parse(req.body); + std::string content; + if (body.count("tokens") != 0) + { + const std::vector<llama_token> tokens = body["tokens"]; + content = tokens_to_str(llama.ctx, tokens.cbegin(), tokens.cend()); + } + + const json data = format_detokenized_response(content); + return res.set_content(data.dump(), "application/json"); }); + svr.Post("/embedding", [&llama](const Request &req, Response &res) { auto lock = llama.lock(); |