summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorCebtenzzre <cebtenzzre@gmail.com>2023-09-28 17:41:44 -0400
committerGitHub <noreply@github.com>2023-09-28 17:41:44 -0400
commitbc39553c901a91cfcb757863586250838c83eeab (patch)
tree507d1aedf8ad63e4ed84e37154de9abf31ba358a /examples
parent0ccfc62a96a6b59a8faa14d1b350493f4cd51ae2 (diff)
build : enable more non-default compiler warnings (#3200)
Diffstat (limited to 'examples')
-rw-r--r--examples/baby-llama/baby-llama.cpp13
-rw-r--r--examples/llama-bench/llama-bench.cpp4
-rw-r--r--examples/main/main.cpp2
-rw-r--r--examples/quantize/quantize.cpp1
-rw-r--r--examples/train-text-from-scratch/train-text-from-scratch.cpp6
5 files changed, 14 insertions, 12 deletions
diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp
index fb1a15c4..8155101d 100644
--- a/examples/baby-llama/baby-llama.cpp
+++ b/examples/baby-llama/baby-llama.cpp
@@ -1,9 +1,12 @@
#include "ggml.h"
#include "train.h"
+
#include <vector>
#include <cassert>
-#include <random>
+#include <cstdlib>
#include <cstring>
+#include <random>
+#include <vector>
#if defined(_MSC_VER)
#pragma warning(disable: 4244 4267) // possible loss of data
@@ -64,7 +67,7 @@ static struct ggml_tensor * randomize_tensor(
break;
default:
assert(false);
- };
+ }
return tensor;
}
@@ -389,7 +392,7 @@ static void randomize_model_lora(
free_random_normal_distribution(rnd);
}
-static bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
+static void init_kv_cache(struct llama_kv_cache* cache, struct llama_model * model, int n_batch) {
const auto & hparams = model->hparams;
const uint32_t n_ctx = hparams.n_ctx;
@@ -415,14 +418,12 @@ static bool init_kv_cache(struct llama_kv_cache* cache, struct llama_model * mod
if (!cache->ctx) {
fprintf(stderr, "%s: failed to allocate memory for kv cache\n", __func__);
- return false;
+ exit(1);
}
}
cache->k = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
cache->v = ggml_new_tensor_1d(cache->ctx, GGML_TYPE_F32, n_elements);
-
- return true;
}
static bool init_kv_cache_lora(struct llama_kv_cache* cache, struct llama_model_lora * model, int n_batch) {
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 93bb0c8b..a04115c9 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -655,9 +655,9 @@ struct printer {
virtual ~printer() {}
FILE * fout;
- virtual void print_header(const cmd_params & params) { (void) params; };
+ virtual void print_header(const cmd_params & params) { (void) params; }
virtual void print_test(const test & t) = 0;
- virtual void print_footer() { };
+ virtual void print_footer() { }
};
struct csv_printer : public printer {
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index fd506773..3a4ed3f7 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -852,7 +852,7 @@ int main(int argc, char ** argv) {
llama_backend_free();
#ifndef LOG_DISABLE_LOGS
- LOG_TEE("Log end\n")
+ LOG_TEE("Log end\n");
#endif // LOG_DISABLE_LOGS
return 0;
diff --git a/examples/quantize/quantize.cpp b/examples/quantize/quantize.cpp
index 1c1d957e..c7dd0d89 100644
--- a/examples/quantize/quantize.cpp
+++ b/examples/quantize/quantize.cpp
@@ -72,6 +72,7 @@ static bool try_parse_ftype(const std::string & ftype_str_in, llama_ftype & ftyp
// usage:
// ./quantize [--allow-requantize] [--leave-output-tensor] models/llama/ggml-model.gguf [models/llama/ggml-model-quant.gguf] type [nthreads]
//
+[[noreturn]]
static void usage(const char * executable) {
printf("usage: %s [--help] [--allow-requantize] [--leave-output-tensor] model-f32.gguf [model-quant.gguf] type [nthreads]\n\n", executable);
printf(" --allow-requantize: Allows requantizing tensors that have already been quantized. Warning: This can severely reduce quality compared to quantizing from 16bit or 32bit\n");
diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp
index a9cf8a38..5043f32d 100644
--- a/examples/train-text-from-scratch/train-text-from-scratch.cpp
+++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp
@@ -483,7 +483,7 @@ static struct ggml_tensor * llama_build_train_graphs(
}
#define GGUF_GET_KEY(ctx, dst, func, type, req, key) \
-{ \
+do { \
const std::string skey(key); \
const int kid = gguf_find_key(ctx, skey.c_str()); \
if (kid >= 0) { \
@@ -495,7 +495,7 @@ static struct ggml_tensor * llama_build_train_graphs(
} else if (req) { \
die_fmt("key not found in model: %s", skey.c_str()); \
} \
-}
+} while (0)
static void load_llama_model_gguf(struct gguf_context * fctx, struct ggml_context * f_ggml_ctx, struct my_llama_model * model) {
// NOTE: gguf_context must be initialized with f_ggml_ctx and no_alloc=false, otherwise tensor data can not be read
@@ -786,7 +786,7 @@ struct train_params {
float rope_freq_scale;
};
-struct train_params get_default_train_params() {
+static struct train_params get_default_train_params() {
struct train_params params;
params.common = get_default_train_params_common();
params.fn_vocab_model = "ggml-vic7b-uncensored-q4_0.bin";