From 00d62adb79bf914a95fb9a2e8f42f3029e76d62c Mon Sep 17 00:00:00 2001 From: Cebtenzzre Date: Thu, 7 Sep 2023 13:22:29 -0400 Subject: fix some warnings from gcc and clang-tidy (#3038) Co-authored-by: xaedes --- examples/main/main.cpp | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) (limited to 'examples/main') diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 9201b53b..c9ca7719 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -48,8 +48,9 @@ static bool is_interacting = false; void write_logfile( const llama_context * ctx, const gpt_params & params, const llama_model * model, - const std::vector input_tokens, const std::string output, const std::vector output_tokens) { - + const std::vector & input_tokens, const std::string & output, + const std::vector & output_tokens +) { if (params.logdir.empty()) { return; } @@ -109,7 +110,7 @@ int main(int argc, char ** argv) { gpt_params params; g_params = ¶ms; - if (gpt_params_parse(argc, argv, params) == false) { + if (!gpt_params_parse(argc, argv, params)) { return 1; } @@ -303,7 +304,7 @@ int main(int argc, char ** argv) { // debug message about similarity of saved session, if applicable size_t n_matching_session_tokens = 0; - if (session_tokens.size() > 0) { + if (!session_tokens.empty()) { for (llama_token id : session_tokens) { if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) { break; @@ -401,7 +402,7 @@ int main(int argc, char ** argv) { LOG_TEE("%s: interactive mode on.\n", __func__); - if (params.antiprompt.size()) { + if (!params.antiprompt.empty()) { for (const auto & antiprompt : params.antiprompt) { LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str()); } @@ -499,7 +500,7 @@ int main(int argc, char ** argv) { while ((n_remain != 0 && !is_antiprompt) || params.interactive) { // predict - if (embd.size() > 0) { + if (!embd.empty()) { // Note: n_ctx - 4 here is to match the logic for commandline prompt handling via // --prompt or --file which uses the same value. int max_embd_size = n_ctx - 4; @@ -624,7 +625,7 @@ int main(int argc, char ** argv) { LOG("n_past = %d\n", n_past); } - if (embd.size() > 0 && !path_session.empty()) { + if (!embd.empty() && !path_session.empty()) { session_tokens.insert(session_tokens.end(), embd.begin(), embd.end()); n_session_consumed = session_tokens.size(); } @@ -695,7 +696,7 @@ int main(int argc, char ** argv) { // if not currently processing queued inputs; if ((int) embd_inp.size() <= n_consumed) { // check for reverse prompt - if (params.antiprompt.size()) { + if (!params.antiprompt.empty()) { std::string last_output; for (auto id : last_tokens) { last_output += llama_token_to_piece(ctx, id); @@ -732,7 +733,7 @@ int main(int argc, char ** argv) { LOG("found EOS token\n"); if (params.interactive) { - if (params.antiprompt.size() != 0) { + if (!params.antiprompt.empty()) { // tokenize and inject first reverse prompt const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false); embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end()); -- cgit v1.2.3