diff options
author | Isaac McFadyen <isaac@imcf.me> | 2024-01-11 09:33:26 -0500 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-01-11 16:33:26 +0200 |
commit | 2f043328e3116724d15b915b5c6078e2df860a69 (patch) | |
tree | 17879c9d90c597d7d39e2a9cf889816a86f1786a /examples/server | |
parent | 2a7c94db5fb67b2f8882d2d16a11bf5d8d12d397 (diff) |
server : fix typo in model name (#4876)
Diffstat (limited to 'examples/server')
-rw-r--r-- | examples/server/server.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 4a071499..860e4e9a 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -2515,7 +2515,7 @@ json oaicompat_completion_params_parse( // // https://platform.openai.com/docs/api-reference/chat/create llama_sampling_params default_sparams; - llama_params["model"] = json_value(body, "model", std::string("uknown")); + llama_params["model"] = json_value(body, "model", std::string("unknown")); llama_params["prompt"] = format_chatml(body["messages"]); // OpenAI 'messages' to llama.cpp 'prompt' llama_params["cache_prompt"] = json_value(body, "cache_prompt", false); llama_params["temperature"] = json_value(body, "temperature", 0.0); |