summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorAidanBeltonS <87009434+AidanBeltonS@users.noreply.github.com>2024-02-26 14:02:11 +0000
committerGitHub <noreply@github.com>2024-02-26 19:32:11 +0530
commite849078c6e09e72fdd2c95ba61f5fba9a7b2d9ef (patch)
treee7a34b8b13e10867d4194cbec4f78d111900b344 /llama.cpp
parent67fd33132fab93e6c2087bd6fa656a8a57419efa (diff)
[SYCL] Add support for soft_max ALiBi (#5639)
* Add support for bias * Update pre-processor * rm commented code * fix format * fix CI --------- Co-authored-by: Abhilash Majumder <30946547+abhilash1910@users.noreply.github.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 28430254..f549e7d0 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -4894,8 +4894,8 @@ static struct ggml_tensor * llm_build_kqv(
ggml_mul_mat_set_prec(kq, GGML_PREC_F32);
}
-#if defined(GGML_USE_VULKAN) || defined(GGML_USE_KOMPUTE) || defined(GGML_USE_SYCL)
-#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Vulkan, Kompute, and SYCL")
+#if defined(GGML_USE_VULKAN) || defined(GGML_USE_KOMPUTE)
+#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Vulkan, and Kompute")
#pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024")
#pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488")
if (hparams.f_max_alibi_bias > 0.0f) {