From 6e0438da3cc95b89cdbf55f45fa4e324d9076792 Mon Sep 17 00:00:00 2001 From: Steve Grubb Date: Thu, 14 Mar 2024 14:29:32 -0400 Subject: gguf : fix resource leaks (#6061) There several places where a gguf context is allocated. A call to gguf_free is missing in some error paths. Also on linux, llama-bench was missing a fclose. --- examples/llava/clip.cpp | 4 ++++ 1 file changed, 4 insertions(+) (limited to 'examples/llava/clip.cpp') diff --git a/examples/llava/clip.cpp b/examples/llava/clip.cpp index 6653b815..2035554e 100644 --- a/examples/llava/clip.cpp +++ b/examples/llava/clip.cpp @@ -995,6 +995,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { if (!new_clip->ctx_data) { fprintf(stderr, "%s: ggml_init() failed\n", __func__); clip_free(new_clip); + gguf_free(ctx); return nullptr; } @@ -1002,6 +1003,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { if (!fin) { printf("cannot open model file for loading tensors\n"); clip_free(new_clip); + gguf_free(ctx); return nullptr; } @@ -1023,6 +1025,7 @@ struct clip_ctx * clip_model_load(const char * fname, const int verbosity = 1) { if (!fin) { printf("%s: failed to seek for tensor %s\n", __func__, name); clip_free(new_clip); + gguf_free(ctx); return nullptr; } int num_bytes = ggml_nbytes(cur); @@ -1908,6 +1911,7 @@ bool clip_model_quantize(const char * fname_inp, const char * fname_out, const i break; default: printf("Please use an input file in f32 or f16\n"); + gguf_free(ctx_out); return false; } -- cgit v1.2.3