From e8dc55d0065d076d4c20f3c4bfca562701b4edfe Mon Sep 17 00:00:00 2001 From: Jared Van Bortel Date: Tue, 30 Jan 2024 19:04:37 -0500 Subject: kompute : llama-bench support and ggml_cpu_has_kompute() (#5226) --- common/common.cpp | 1 + 1 file changed, 1 insertion(+) (limited to 'common') diff --git a/common/common.cpp b/common/common.cpp index 28801367..0dd1c50c 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1521,6 +1521,7 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "cpu_has_avx512_vnni: %s\n", ggml_cpu_has_avx512_vnni() ? "true" : "false"); fprintf(stream, "cpu_has_cublas: %s\n", ggml_cpu_has_cublas() ? "true" : "false"); fprintf(stream, "cpu_has_clblast: %s\n", ggml_cpu_has_clblast() ? "true" : "false"); + fprintf(stream, "cpu_has_kompute: %s\n", ggml_cpu_has_kompute() ? "true" : "false"); fprintf(stream, "cpu_has_fma: %s\n", ggml_cpu_has_fma() ? "true" : "false"); fprintf(stream, "cpu_has_gpublas: %s\n", ggml_cpu_has_gpublas() ? "true" : "false"); fprintf(stream, "cpu_has_neon: %s\n", ggml_cpu_has_neon() ? "true" : "false"); -- cgit v1.2.3