diff options
author | slaren <slarengh@gmail.com> | 2024-03-26 01:16:01 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-03-26 01:16:01 +0100 |
commit | 280345968dabc00d212d43e31145f5c9961a7604 (patch) | |
tree | 4d0ada8b59a4c15cb6d4fe1a6b4740a30dcdb0f2 /examples/llama-bench/llama-bench.cpp | |
parent | b06c16ef9f81d84da520232c125d4d8a1d273736 (diff) |
cuda : rename build flag to LLAMA_CUDA (#6299)
Diffstat (limited to 'examples/llama-bench/llama-bench.cpp')
-rw-r--r-- | examples/llama-bench/llama-bench.cpp | 4 |
1 files changed, 2 insertions, 2 deletions
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 82413b79..27e11320 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -113,7 +113,7 @@ static std::string get_cpu_info() { static std::string get_gpu_info() { std::string id; -#ifdef GGML_USE_CUBLAS +#ifdef GGML_USE_CUDA int count = ggml_backend_cuda_get_device_count(); for (int i = 0; i < count; i++) { char buf[128]; @@ -808,7 +808,7 @@ struct test { const std::string test::build_commit = LLAMA_COMMIT; const int test::build_number = LLAMA_BUILD_NUMBER; -const bool test::cuda = !!ggml_cpu_has_cublas(); +const bool test::cuda = !!ggml_cpu_has_cuda(); const bool test::opencl = !!ggml_cpu_has_clblast(); const bool test::vulkan = !!ggml_cpu_has_vulkan(); const bool test::kompute = !!ggml_cpu_has_kompute(); |