From aab606a11fc0a9740a7f297521c3eef851dfb351 Mon Sep 17 00:00:00 2001 From: Xuan Son Nguyen Date: Fri, 15 Mar 2024 09:44:57 +0100 Subject: llama : add Orion chat template (#6066) --- llama.cpp | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) (limited to 'llama.cpp') diff --git a/llama.cpp b/llama.cpp index 2c384197..b8a8d272 100644 --- a/llama.cpp +++ b/llama.cpp @@ -14242,6 +14242,26 @@ static int32_t llama_chat_apply_template_internal( if (add_ass) { ss << "model\n"; } + } else if (tmpl == "orion" || tmpl.find("'\\n\\nAssistant: ' + eos_token") != std::string::npos) { + // OrionStarAI/Orion-14B-Chat + std::string system_prompt = ""; + for (auto message : chat) { + std::string role(message->role); + if (role == "system") { + // there is no system message support, we will merge it with user prompt + system_prompt = message->content; + continue; + } else if (role == "user") { + ss << "Human: "; + if (!system_prompt.empty()) { + ss << system_prompt << "\n\n"; + system_prompt = ""; + } + ss << message->content << "\n\nAssistant: "; + } else { + ss << message->content << ""; + } + } } else { // template not supported return -1; -- cgit v1.2.3