summaryrefslogtreecommitdiff
path: root/tests/test-autorelease.cpp
diff options
context:
space:
mode:
authorcrasm <crasm@git.vczf.net>2024-01-26 07:18:00 -0500
committerGitHub <noreply@github.com>2024-01-26 14:18:00 +0200
commit413e7b0559f922bd4de5e9eec548829d111651b1 (patch)
tree8d9ddf3f1e8ea25a105ed8aa04321e57d93cbfff /tests/test-autorelease.cpp
parent6dd3c28c9cd1ef74b49d79f47d668759346a3c6c (diff)
ci : add model tests + script wrapper (#4586)
* scripts : add lib.sh and lib_test.sh * scripts : stub out new ci-run.sh script * scripts : switch to PascalCase for functions This looks a little odd at first, but I find it very useful as a convention to know if a command is part of our code vs a builtin. * scripts : add some fancy conversion from snake_case to PascalCase * Add venv to ci/run.sh * Revert scripts work * scripts : add wrapper script for local use of ci/run.sh * Simplify .gitignore for tests, clang-tidy fixes * Label all ctest tests * ci : ctest uses -L main * Attempt at writing ctest_with_model * Update test-model-load-cancel * ci : add ctest_with_model for debug and release ggml-ci * Fix gg_get_model function ggml-ci * got stuck on CMake * Add get_model.cpp to tests/CMakeLists.txt ggml-ci * Fix README.md output for ctest_with_model ggml-ci * workflows : use `-L main` for all ctest ggml-ci * Fixes * GG_RUN_CTEST_MODELFILE => LLAMACPP_TESTMODELFILE * Always show warning rather than failing if model file variable is not set * scripts : update usage text for ci-run.sh
Diffstat (limited to 'tests/test-autorelease.cpp')
-rw-r--r--tests/test-autorelease.cpp12
1 files changed, 4 insertions, 8 deletions
diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp
index 289c6ba6..36a23c0b 100644
--- a/tests/test-autorelease.cpp
+++ b/tests/test-autorelease.cpp
@@ -5,19 +5,15 @@
#include <thread>
#include "llama.h"
+#include "get-model.h"
// This creates a new context inside a pthread and then tries to exit cleanly.
int main(int argc, char ** argv) {
- if (argc < 2) {
- printf("Usage: %s model.gguf\n", argv[0]);
- return 0; // intentionally return success
- }
+ auto * model_path = get_model_or_exit(argc, argv);
- const std::string fname = argv[1];
-
- std::thread([&fname]() {
+ std::thread([&model_path]() {
llama_backend_init(false);
- auto * model = llama_load_model_from_file(fname.c_str(), llama_model_default_params());
+ auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
llama_free(ctx);
llama_free_model(model);