summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp14
1 files changed, 7 insertions, 7 deletions
diff --git a/llama.cpp b/llama.cpp
index 685712d1..666acc21 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -449,7 +449,7 @@ struct LLM_TN {
//
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
-{ \
+do { \
const std::string skey(key); \
const int kid = gguf_find_key(ctx, skey.c_str()); \
if (kid >= 0) { \
@@ -461,7 +461,7 @@ struct LLM_TN {
} else if (req) { \
throw std::runtime_error(format("key not found in model: %s", skey.c_str())); \
} \
-}
+} while (0)
//
// ggml helpers
@@ -1913,7 +1913,7 @@ static void llm_load_hparams(
}
} break;
default: (void)0;
- };
+ }
model.ftype = ml.ftype;
}
@@ -2438,7 +2438,7 @@ static void llm_load_tensors(
} break;
default:
throw std::runtime_error("unknown architecture");
- };
+ }
}
ml.done_getting_tensors();
@@ -3981,7 +3981,7 @@ static struct ggml_cgraph * llama_build_graph(
} break;
default:
GGML_ASSERT(false);
- };
+ }
return result;
}
@@ -4626,7 +4626,7 @@ static std::vector<llama_vocab::id> llama_tokenize_internal(const llama_vocab &
llm_tokenizer_bpe tokenizer(vocab);
tokenizer.tokenize(raw_text, output);
} break;
- };
+ }
return output;
}
@@ -7520,7 +7520,7 @@ int llama_token_to_piece(const struct llama_model * model, llama_token token, ch
buf[2] = '\x85';
return 3;
} else if (llama_is_control_token(model->vocab, token)) {
- ;
+ // do nothing
} else if (llama_is_byte_token(model->vocab, token)) {
if (length < 1) {
return -1;