From e937066420b79a757bf80e9836eb12b88420a218 Mon Sep 17 00:00:00 2001 From: slaren Date: Sun, 19 Nov 2023 11:10:52 +0100 Subject: gguf-py : export chat templates (#4125) * gguf-py : export chat templates * llama.cpp : escape new lines in gguf kv info prints * gguf-py : bump version * gguf-py : check chat_template type * gguf-py : initialize chat_template --- llama.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'llama.cpp') diff --git a/llama.cpp b/llama.cpp index 56d8e765..062c9757 100644 --- a/llama.cpp +++ b/llama.cpp @@ -1871,6 +1871,7 @@ struct llama_model_loader { if (value.size() > MAX_VALUE_LEN) { value = format("%s...", value.substr(0, MAX_VALUE_LEN - 3).c_str()); } + replace_all(value, "\n", "\\n"); LLAMA_LOG_INFO("%s: - kv %3d: %42s %-16s = %s\n", __func__, i, name, type_name.c_str(), value.c_str()); } -- cgit v1.2.3