summaryrefslogtreecommitdiff
path: root/examples/main/main.cpp
diff options
context:
space:
mode:
authorCebtenzzre <cebtenzzre@gmail.com>2023-09-07 13:22:29 -0400
committerGitHub <noreply@github.com>2023-09-07 13:22:29 -0400
commit00d62adb79bf914a95fb9a2e8f42f3029e76d62c (patch)
tree36d294e4df3ded0cd0f3c96ab7bd64dd800002ec /examples/main/main.cpp
parent4fa2cc1750b861880de42515cb19c13b2d776ee2 (diff)
fix some warnings from gcc and clang-tidy (#3038)
Co-authored-by: xaedes <xaedes@gmail.com>
Diffstat (limited to 'examples/main/main.cpp')
-rw-r--r--examples/main/main.cpp19
1 files changed, 10 insertions, 9 deletions
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index 9201b53b..c9ca7719 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -48,8 +48,9 @@ static bool is_interacting = false;
void write_logfile(
const llama_context * ctx, const gpt_params & params, const llama_model * model,
- const std::vector<llama_token> input_tokens, const std::string output, const std::vector<llama_token> output_tokens) {
-
+ const std::vector<llama_token> & input_tokens, const std::string & output,
+ const std::vector<llama_token> & output_tokens
+) {
if (params.logdir.empty()) {
return;
}
@@ -109,7 +110,7 @@ int main(int argc, char ** argv) {
gpt_params params;
g_params = &params;
- if (gpt_params_parse(argc, argv, params) == false) {
+ if (!gpt_params_parse(argc, argv, params)) {
return 1;
}
@@ -303,7 +304,7 @@ int main(int argc, char ** argv) {
// debug message about similarity of saved session, if applicable
size_t n_matching_session_tokens = 0;
- if (session_tokens.size() > 0) {
+ if (!session_tokens.empty()) {
for (llama_token id : session_tokens) {
if (n_matching_session_tokens >= embd_inp.size() || id != embd_inp[n_matching_session_tokens]) {
break;
@@ -401,7 +402,7 @@ int main(int argc, char ** argv) {
LOG_TEE("%s: interactive mode on.\n", __func__);
- if (params.antiprompt.size()) {
+ if (!params.antiprompt.empty()) {
for (const auto & antiprompt : params.antiprompt) {
LOG_TEE("Reverse prompt: '%s'\n", antiprompt.c_str());
}
@@ -499,7 +500,7 @@ int main(int argc, char ** argv) {
while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
// predict
- if (embd.size() > 0) {
+ if (!embd.empty()) {
// Note: n_ctx - 4 here is to match the logic for commandline prompt handling via
// --prompt or --file which uses the same value.
int max_embd_size = n_ctx - 4;
@@ -624,7 +625,7 @@ int main(int argc, char ** argv) {
LOG("n_past = %d\n", n_past);
}
- if (embd.size() > 0 && !path_session.empty()) {
+ if (!embd.empty() && !path_session.empty()) {
session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
n_session_consumed = session_tokens.size();
}
@@ -695,7 +696,7 @@ int main(int argc, char ** argv) {
// if not currently processing queued inputs;
if ((int) embd_inp.size() <= n_consumed) {
// check for reverse prompt
- if (params.antiprompt.size()) {
+ if (!params.antiprompt.empty()) {
std::string last_output;
for (auto id : last_tokens) {
last_output += llama_token_to_piece(ctx, id);
@@ -732,7 +733,7 @@ int main(int argc, char ** argv) {
LOG("found EOS token\n");
if (params.interactive) {
- if (params.antiprompt.size() != 0) {
+ if (!params.antiprompt.empty()) {
// tokenize and inject first reverse prompt
const auto first_antiprompt = ::llama_tokenize(ctx, params.antiprompt.front(), false);
embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());