From 1442677f92e45a475be7b4d056e3633d1d6f813b Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Tue, 4 Jun 2024 21:23:39 +0300 Subject: common : refactor cli arg parsing (#7675) * common : gpt_params_parse do not print usage * common : rework usage print (wip) * common : valign * common : rework print_usage * infill : remove cfg support * common : reorder args * server : deduplicate parameters ggml-ci * common : add missing header ggml-ci * common : remote --random-prompt usages ggml-ci * examples : migrate to gpt_params ggml-ci * batched-bench : migrate to gpt_params * retrieval : migrate to gpt_params * common : change defaults for escape and n_ctx * common : remove chatml and instruct params ggml-ci * common : passkey use gpt_params --- examples/server/utils.hpp | 7 ------- 1 file changed, 7 deletions(-) (limited to 'examples/server/utils.hpp') diff --git a/examples/server/utils.hpp b/examples/server/utils.hpp index d8a2286e..b7bfb41d 100644 --- a/examples/server/utils.hpp +++ b/examples/server/utils.hpp @@ -116,13 +116,6 @@ static inline void server_log(const char * level, const char * function, int lin // chat template utils // -// Check if the template supplied via "--chat-template" is supported or not. Returns true if it's valid -inline bool verify_custom_template(const std::string & tmpl) { - llama_chat_message chat[] = {{"user", "test"}}; - int res = llama_chat_apply_template(nullptr, tmpl.c_str(), chat, 1, true, nullptr, 0); - return res >= 0; -} - // Format given chat. If tmpl is empty, we take the template from model metadata inline std::string format_chat(const struct llama_model * model, const std::string & tmpl, const std::vector & messages) { size_t alloc_size = 0; -- cgit v1.2.3