summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp16
1 files changed, 16 insertions, 0 deletions
diff --git a/llama.cpp b/llama.cpp
index 45eeadbe..09d799f7 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -11,6 +11,8 @@
# include "ggml-cuda.h"
#elif defined(GGML_USE_CLBLAST)
# include "ggml-opencl.h"
+#elif defined(GGML_USE_SYCL)
+# include "ggml-sycl.h"
#endif
#ifdef GGML_USE_METAL
@@ -1278,6 +1280,8 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_cpu(bool host_buffer
if (host_buffer) {
buft = ggml_backend_cuda_host_buffer_type();
}
+#elif defined(GGML_USE_SYCL)
+ buft = ggml_backend_sycl_host_buffer_type();
#elif defined(GGML_USE_CPU_HBM)
buft = ggml_backend_cpu_hbm_buffer_type();
#endif
@@ -1297,6 +1301,8 @@ static ggml_backend_buffer_type_t llama_default_buffer_type_offload(int gpu) {
buft = ggml_backend_metal_buffer_type();
#elif defined(GGML_USE_CUBLAS)
buft = ggml_backend_cuda_buffer_type(gpu);
+#elif defined(GGML_USE_SYCL)
+ buft = ggml_backend_sycl_buffer_type(gpu);
#elif defined(GGML_USE_CLBLAST)
buft = ggml_backend_opencl_buffer_type();
#endif
@@ -10225,6 +10231,16 @@ struct llama_context * llama_new_context_with_model(
}
}
}
+#elif defined(GGML_USE_SYCL)
+ if (model->n_gpu_layers > 0) {
+ ggml_backend_t backend = ggml_backend_sycl_init(model->main_gpu);
+ if (backend == nullptr) {
+ LLAMA_LOG_ERROR("%s: failed to initialize SYCL%d backend\n", __func__, model->main_gpu);
+ llama_free(ctx);
+ return nullptr;
+ }
+ ctx->backends.push_back(backend);
+ }
#endif
ctx->backend_cpu = ggml_backend_cpu_init();
if (ctx->backend_cpu == nullptr) {