diff options
author | Cebtenzzre <cebtenzzre@gmail.com> | 2023-08-30 02:20:26 -0400 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-30 09:20:26 +0300 |
commit | 849408957c687cde4ab32c147107f643fc55130b (patch) | |
tree | 5ca464144f25b39f9978a9e650e70e77dcb583e7 | |
parent | 06abf8eebabe086ca4003dee2754ab45032cd3fd (diff) |
tests : add a C compliance test (#2848)
* tests : add a C compliance test
* make : build C compliance test by default
* make : fix clean and make sure C test fails on clang
* make : move -Werror=implicit-int to CFLAGS
-rw-r--r-- | CMakeLists.txt | 1 | ||||
-rw-r--r-- | Makefile | 9 | ||||
-rw-r--r-- | tests/CMakeLists.txt | 5 | ||||
-rw-r--r-- | tests/test-c.c | 3 |
4 files changed, 15 insertions, 3 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 1eae2d67..d6c1b3b3 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -402,6 +402,7 @@ if (LLAMA_ALL_WARNINGS) -Wstrict-prototypes -Wpointer-arith -Wmissing-prototypes + -Werror=implicit-int ) set(cxx_flags -Wall @@ -1,5 +1,5 @@ # Define the default target now so that it is always the first target -BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam_search +BUILD_TARGETS = main quantize quantize-stats perplexity embedding vdot train-text-from-scratch convert-llama2c-to-ggml simple save-load-state server embd-input-test gguf llama-bench baby-llama beam_search tests/test-c.o # Binaries only useful for tests TEST_TARGETS = tests/test-llama-grammar tests/test-grammar-parser tests/test-double-float tests/test-grad0 tests/test-opt tests/test-quantize-fns tests/test-quantize-perf tests/test-sampling tests/test-tokenizer-0-llama tests/test-tokenizer-0-falcon tests/test-tokenizer-1 @@ -64,7 +64,7 @@ endif # warnings CFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wdouble-promotion -Wshadow -Wstrict-prototypes -Wpointer-arith \ - -Wmissing-prototypes + -Wmissing-prototypes -Werror=implicit-int CXXFLAGS += -Wall -Wextra -Wpedantic -Wcast-qual -Wno-unused-function -Wno-multichar # OS specific @@ -369,7 +369,7 @@ libllama.so: llama.o ggml.o $(OBJS) $(CXX) $(CXXFLAGS) -shared -fPIC -o $@ $^ $(LDFLAGS) clean: - rm -vf *.o *.so *.dll benchmark-matmult build-info.h $(BUILD_TARGETS) $(TEST_TARGETS) + rm -vf *.o tests/*.o *.so *.dll benchmark-matmult build-info.h $(BUILD_TARGETS) $(TEST_TARGETS) # # Examples @@ -489,3 +489,6 @@ tests/test-tokenizer-0-llama: tests/test-tokenizer-0-llama.cpp build-info.h ggml tests/test-tokenizer-1: tests/test-tokenizer-1.cpp build-info.h ggml.o llama.o common.o $(OBJS) $(CXX) $(CXXFLAGS) $(filter-out %.h,$^) -o $@ $(LDFLAGS) + +tests/test-c.o: tests/test-c.c llama.h + $(CC) $(CFLAGS) -c $(filter-out %.h,$^) -o $@ diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt index ca1f39d3..483210d7 100644 --- a/tests/CMakeLists.txt +++ b/tests/CMakeLists.txt @@ -37,3 +37,8 @@ llama_build_and_test_executable(test-grammar-parser.cpp) llama_build_and_test_executable(test-llama-grammar.cpp) llama_build_and_test_executable(test-grad0.cpp) # SLOW # llama_build_and_test_executable(test-opt.cpp) # SLOW + +# dummy executable - not installed +get_filename_component(TEST_TARGET test-c.c NAME_WE) +add_executable(${TEST_TARGET} test-c.c) +target_link_libraries(${TEST_TARGET} PRIVATE llama) diff --git a/tests/test-c.c b/tests/test-c.c new file mode 100644 index 00000000..a0507108 --- /dev/null +++ b/tests/test-c.c @@ -0,0 +1,3 @@ +#include "llama.h" + +int main(void) {} |