summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorXuan Son Nguyen <thichthat@gmail.com>2024-03-15 09:44:57 +0100
committerGitHub <noreply@github.com>2024-03-15 10:44:57 +0200
commitaab606a11fc0a9740a7f297521c3eef851dfb351 (patch)
tree58f6b901343c6ddbad604019097b3151e39e774f /llama.cpp
parentb0bc9f4a9da7c19f4779106ea83b23feca747566 (diff)
llama : add Orion chat template (#6066)
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp20
1 files changed, 20 insertions, 0 deletions
diff --git a/llama.cpp b/llama.cpp
index 2c384197..b8a8d272 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -14242,6 +14242,26 @@ static int32_t llama_chat_apply_template_internal(
if (add_ass) {
ss << "<start_of_turn>model\n";
}
+ } else if (tmpl == "orion" || tmpl.find("'\\n\\nAssistant: ' + eos_token") != std::string::npos) {
+ // OrionStarAI/Orion-14B-Chat
+ std::string system_prompt = "";
+ for (auto message : chat) {
+ std::string role(message->role);
+ if (role == "system") {
+ // there is no system message support, we will merge it with user prompt
+ system_prompt = message->content;
+ continue;
+ } else if (role == "user") {
+ ss << "Human: ";
+ if (!system_prompt.empty()) {
+ ss << system_prompt << "\n\n";
+ system_prompt = "";
+ }
+ ss << message->content << "\n\nAssistant: </s>";
+ } else {
+ ss << message->content << "</s>";
+ }
+ }
} else {
// template not supported
return -1;