summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJared Van Bortel <jared@nomic.ai>2024-01-29 17:11:27 -0500
committerGitHub <noreply@github.com>2024-01-29 17:11:27 -0500
commit6daa69ee81851ab26ca8aefca1a4202941fc0262 (patch)
treee6cf354fcd28861c35b78488a4b01ee98f113c90
parentfbf1ddec69f7001cc707de17fa74d7200813bbac (diff)
kompute : fix fallback to CPU (#5201)
-rw-r--r--llama.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 9631506c..796aaa89 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -4136,7 +4136,7 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
}
#ifdef GGML_USE_KOMPUTE
- if (ggml_vk_has_device() && params.n_gpu_layers > 0 && (
+ if (params.n_gpu_layers > 0 && (
!(model.arch == LLM_ARCH_LLAMA || model.arch == LLM_ARCH_FALCON)
|| !(
model.ftype == LLAMA_FTYPE_ALL_F32 ||
@@ -4145,8 +4145,8 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
model.ftype == LLAMA_FTYPE_MOSTLY_Q4_1
)
)) {
- // disable Vulkan due to unsupported model architecture or quantization type
// TODO(cebtenzzre): propagate this error outside of llama_load_model_from_file
+ LLAMA_LOG_WARN("%s: disabling Kompute due to unsupported model arch or quantization\n", __func__);
params.n_gpu_layers = 0;
}
#endif