summaryrefslogtreecommitdiff
path: root/common/common.cpp
diff options
context:
space:
mode:
author0cc4m <picard12@live.de>2024-06-03 10:59:14 +0200
committerGitHub <noreply@github.com>2024-06-03 10:59:14 +0200
commit3d7ebf63123b8652fb7bbecef7ba731202309901 (patch)
tree8adfcc3dd20946ece9c0b8d15b131823b24455ae /common/common.cpp
parenta10cda58d3199cd85305e0f03a8c6056714ae2e8 (diff)
Vulkan Mixture of Experts (MoE) support (#7628)
* Finish Vulkan mul_mat_id implementation * Add Vulkan sum_rows and div ops * Fix MUL_MAT_ID matrix matrix shader * Fix MUL_MAT_ID matrix vector shader dispatch size * Fix MUL_MAT_ID matrix vector shader and dispatch code * Update Vulkan CPU offload for MUL_MAT_ID * Fix crash when using split mode none and setting a main GPU
Diffstat (limited to 'common/common.cpp')
-rw-r--r--common/common.cpp12
1 files changed, 6 insertions, 6 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 65103c3c..022bfe28 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -1002,9 +1002,9 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
return true;
}
params.main_gpu = std::stoi(argv[i]);
-#ifndef GGML_USE_CUDA_SYCL
- fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL. Setting the main GPU has no effect.\n");
-#endif // GGML_USE_CUDA_SYCL
+#ifndef GGML_USE_CUDA_SYCL_VULKAN
+ fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the main GPU has no effect.\n");
+#endif // GGML_USE_CUDA_SYCL_VULKAN
return true;
}
if (arg == "--split-mode" || arg == "-sm") {
@@ -1030,9 +1030,9 @@ bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg, gpt_pa
invalid_param = true;
return true;
}
-#ifndef GGML_USE_CUDA_SYCL
- fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL. Setting the split mode has no effect.\n");
-#endif // GGML_USE_CUDA_SYCL
+#ifndef GGML_USE_CUDA_SYCL_VULKAN
+ fprintf(stderr, "warning: llama.cpp was compiled without CUDA/SYCL/Vulkan. Setting the split mode has no effect.\n");
+#endif // GGML_USE_CUDA_SYCL_VULKAN
return true;
}
if (arg == "--tensor-split" || arg == "-ts") {