From c918fe8dca8fa1c4602427e0a4b88e20046f6c34 Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Wed, 17 Jan 2024 18:38:39 +0200 Subject: metal : create autorelease pool during library build (#4970) * metal : create autorelease pool during library build ggml-ci * test : simplify ggml-ci --- tests/CMakeLists.txt | 1 + tests/test-autorelease.cpp | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+) create mode 100644 tests/test-autorelease.cpp (limited to 'tests') diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index 7c932240..d7aaab84 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -49,6 +49,7 @@ llama_build_and_test_executable(test-llama-grammar.cpp) llama_build_and_test_executable(test-grad0.cpp) # llama_build_and_test_executable(test-opt.cpp) # SLOW llama_build_and_test_executable(test-backend-ops.cpp) +llama_build_and_test_executable(test-autorelease.cpp) llama_build_and_test_executable(test-rope.cpp) diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp new file mode 100644 index 00000000..289c6ba6 --- /dev/null +++ b/tests/test-autorelease.cpp @@ -0,0 +1,28 @@ +// ref: https://github.com/ggerganov/llama.cpp/issues/4952#issuecomment-1892864763 + +#include +#include +#include + +#include "llama.h" + +// This creates a new context inside a pthread and then tries to exit cleanly. +int main(int argc, char ** argv) { + if (argc < 2) { + printf("Usage: %s model.gguf\n", argv[0]); + return 0; // intentionally return success + } + + const std::string fname = argv[1]; + + std::thread([&fname]() { + llama_backend_init(false); + auto * model = llama_load_model_from_file(fname.c_str(), llama_model_default_params()); + auto * ctx = llama_new_context_with_model(model, llama_context_default_params()); + llama_free(ctx); + llama_free_model(model); + llama_backend_free(); + }).join(); + + return 0; +} -- cgit v1.2.3