summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--examples/gguf/gguf.cpp1
-rw-r--r--examples/llama-bench/llama-bench.cpp1
-rw-r--r--examples/llava/clip.cpp4
-rw-r--r--examples/train-text-from-scratch/train-text-from-scratch.cpp1
4 files changed, 7 insertions, 0 deletions
diff --git a/examples/gguf/gguf.cpp b/examples/gguf/gguf.cpp
index e67be4fb..5444503a 100644
--- a/examples/gguf/gguf.cpp
+++ b/examples/gguf/gguf.cpp
@@ -211,6 +211,7 @@ static bool gguf_ex_read_1(const std::string & fname) {
for (int j = 0; j < ggml_nelements(cur); ++j) {
if (data[j] != 100 + i) {
fprintf(stderr, "%s: tensor[%d]: data[%d] = %f\n", __func__, i, j, data[j]);
+ gguf_free(ctx);
return false;
}
}
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index bf94e7e7..d6e5e049 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -103,6 +103,7 @@ static std::string get_cpu_info() {
}
}
}
+ fclose(f);
}
#endif
// TODO: other platforms
diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp
index 6653b815..2035554e 100644
--- a/examples/llava/clip.cpp
+++ b/examples/llava/clip.cpp
@@ -995,6 +995,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
if (!new_clip->ctx_data) {
fprintf(stderr, "%s: ggml_init() failed\n", __func__);
clip_free(new_clip);
+ gguf_free(ctx);
return nullptr;
}
@@ -1002,6 +1003,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
if (!fin) {
printf("cannot open model file for loading tensors\n");
clip_free(new_clip);
+ gguf_free(ctx);
return nullptr;
}
@@ -1023,6 +1025,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) {
if (!fin) {
printf("%s: failed to seek for tensor %s\n", __func__, name);
clip_free(new_clip);
+ gguf_free(ctx);
return nullptr;
}
int num_bytes = ggml_nbytes(cur);
@@ -1908,6 +1911,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i
break;
default:
printf("Please use an input file in f32 or f16\n");
+ gguf_free(ctx_out);
return false;
}
diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp
index 7eafe851..7d06e401 100644
--- a/examples/train-text-from-scratch/train-text-from-scratch.cpp
+++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp
@@ -711,6 +711,7 @@ static bool load_checkpoint_file(const char * filename, struct my_llama_model *
load_checkpoint_gguf(fctx, f_ggml_ctx, model, train);
+ gguf_free(fctx);
return true;
}