summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
Diffstat (limited to 'tests')
-rw-r--r--tests/.gitignore2
-rw-r--r--tests/CMakeLists.txt14
-rw-r--r--tests/get-model.cpp21
-rw-r--r--tests/get-model.h2
-rw-r--r--tests/test-autorelease.cpp12
-rw-r--r--tests/test-model-load-cancel.cpp27
6 files changed, 67 insertions, 11 deletions
diff --git a/tests/.gitignore b/tests/.gitignore
new file mode 100644
index 00000000..59be43b9
--- /dev/null
+++ b/tests/.gitignore
@@ -0,0 +1,2 @@
+*
+!*.*
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index d7aaab84..3e40a78c 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -1,6 +1,6 @@
function(llama_build_executable source)
get_filename_component(TEST_TARGET ${source} NAME_WE)
- add_executable(${TEST_TARGET} ${source})
+ add_executable(${TEST_TARGET} ${source} get-model.cpp)
install(TARGETS ${TEST_TARGET} RUNTIME)
target_link_libraries(${TEST_TARGET} PRIVATE common)
endfunction()
@@ -8,14 +8,20 @@ endfunction()
function(llama_test_executable name source)
get_filename_component(TEST_TARGET ${source} NAME_WE)
add_test(NAME ${name} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
+ set_property(TEST ${name} PROPERTY LABELS "main")
endfunction()
function(llama_build_and_test_executable source)
+ llama_build_and_test_executable_with_label(${source} "main")
+endfunction()
+
+function(llama_build_and_test_executable_with_label source label)
get_filename_component(TEST_TARGET ${source} NAME_WE)
- add_executable(${TEST_TARGET} ${source})
+ add_executable(${TEST_TARGET} ${source} get-model.cpp)
install(TARGETS ${TEST_TARGET} RUNTIME)
target_link_libraries(${TEST_TARGET} PRIVATE common)
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
+ set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${label})
endfunction()
# llama_build_and_test_executable(test-double-float.cpp) # SLOW
@@ -49,10 +55,12 @@ llama_build_and_test_executable(test-llama-grammar.cpp)
llama_build_and_test_executable(test-grad0.cpp)
# llama_build_and_test_executable(test-opt.cpp) # SLOW
llama_build_and_test_executable(test-backend-ops.cpp)
-llama_build_and_test_executable(test-autorelease.cpp)
llama_build_and_test_executable(test-rope.cpp)
+llama_build_and_test_executable_with_label(test-model-load-cancel.cpp "model")
+llama_build_and_test_executable_with_label(test-autorelease.cpp "model")
+
# dummy executable - not installed
get_filename_component(TEST_TARGET test-c.c NAME_WE)
add_executable(${TEST_TARGET} test-c.c)
diff --git a/tests/get-model.cpp b/tests/get-model.cpp
new file mode 100644
index 00000000..4edb685f
--- /dev/null
+++ b/tests/get-model.cpp
@@ -0,0 +1,21 @@
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+
+#include "get-model.h"
+
+char * get_model_or_exit(int argc, char *argv[]) {
+ char * model_path;
+ if (argc > 1) {
+ model_path = argv[1];
+
+ } else {
+ model_path = getenv("LLAMACPP_TEST_MODELFILE");
+ if (!model_path || strlen(model_path) == 0) {
+ fprintf(stderr, "\033[33mWARNING: No model file provided. Skipping this test. Set LLAMACPP_TEST_MODELFILE=<gguf_model_path> to silence this warning and run this test.\n\033[0m");
+ exit(EXIT_SUCCESS);
+ }
+ }
+
+ return model_path;
+}
diff --git a/tests/get-model.h b/tests/get-model.h
new file mode 100644
index 00000000..81a3a0fe
--- /dev/null
+++ b/tests/get-model.h
@@ -0,0 +1,2 @@
+#pragma once
+char * get_model_or_exit(int, char*[]);
diff --git a/tests/test-autorelease.cpp b/tests/test-autorelease.cpp
index 289c6ba6..36a23c0b 100644
--- a/tests/test-autorelease.cpp
+++ b/tests/test-autorelease.cpp
@@ -5,19 +5,15 @@
#include <thread>
#include "llama.h"
+#include "get-model.h"
// This creates a new context inside a pthread and then tries to exit cleanly.
int main(int argc, char ** argv) {
- if (argc < 2) {
- printf("Usage: %s model.gguf\n", argv[0]);
- return 0; // intentionally return success
- }
+ auto * model_path = get_model_or_exit(argc, argv);
- const std::string fname = argv[1];
-
- std::thread([&fname]() {
+ std::thread([&model_path]() {
llama_backend_init(false);
- auto * model = llama_load_model_from_file(fname.c_str(), llama_model_default_params());
+ auto * model = llama_load_model_from_file(model_path, llama_model_default_params());
auto * ctx = llama_new_context_with_model(model, llama_context_default_params());
llama_free(ctx);
llama_free_model(model);
diff --git a/tests/test-model-load-cancel.cpp b/tests/test-model-load-cancel.cpp
new file mode 100644
index 00000000..7ea4bbac
--- /dev/null
+++ b/tests/test-model-load-cancel.cpp
@@ -0,0 +1,27 @@
+#include "llama.h"
+#include "get-model.h"
+
+#include <cstdlib>
+
+int main(int argc, char *argv[] ) {
+ auto * model_path = get_model_or_exit(argc, argv);
+ auto * file = fopen(model_path, "r");
+ if (file == nullptr) {
+ fprintf(stderr, "no model at '%s' found\n", model_path);
+ return EXIT_FAILURE;
+ }
+
+ fprintf(stderr, "using '%s'\n", model_path);
+ fclose(file);
+
+ llama_backend_init(false);
+ auto params = llama_model_params{};
+ params.use_mmap = false;
+ params.progress_callback = [](float progress, void * ctx){
+ (void) ctx;
+ return progress > 0.50;
+ };
+ auto * model = llama_load_model_from_file(model_path, params);
+ llama_backend_free();
+ return model == nullptr ? EXIT_SUCCESS : EXIT_FAILURE;
+}