summaryrefslogtreecommitdiff
path: root/CMakeLists.txt
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-06-04 21:23:20 +0300
committerGitHub <noreply@github.com>2024-06-04 21:23:20 +0300
commit554c247caffed64465f372661f2826640cb10430 (patch)
treece211403d5746a5cde175247a806223e83e17375 /CMakeLists.txt
parent0cd6bd3483fa66124b76a8a8ac794d9ee18c70c1 (diff)
ggml : remove OpenCL (#7735)
ggml-ci
Diffstat (limited to 'CMakeLists.txt')
-rw-r--r--CMakeLists.txt23
1 files changed, 3 insertions, 20 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 76ea2741..cf37d5bb 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -111,7 +111,6 @@ option(LLAMA_CUDA_FA_ALL_QUANTS "llama: compile all quants for Flas
option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
option(LLAMA_HIPBLAS "llama: use hipBLAS" OFF)
option(LLAMA_HIP_UMA "llama: use HIP unified memory architecture" OFF)
-option(LLAMA_CLBLAST "llama: use CLBlast" OFF)
option(LLAMA_VULKAN "llama: use Vulkan" OFF)
option(LLAMA_VULKAN_CHECK_RESULTS "llama: run Vulkan op checks" OFF)
option(LLAMA_VULKAN_DEBUG "llama: enable Vulkan debug output" OFF)
@@ -502,22 +501,6 @@ if (LLAMA_RPC)
set(GGML_SOURCES_RPC ggml-rpc.cpp)
endif()
-if (LLAMA_CLBLAST)
- find_package(CLBlast)
- if (CLBlast_FOUND)
- message(STATUS "CLBlast found")
-
- set(GGML_HEADERS_OPENCL ggml-opencl.h)
- set(GGML_SOURCES_OPENCL ggml-opencl.cpp)
-
- add_compile_definitions(GGML_USE_CLBLAST)
-
- set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} clblast)
- else()
- message(WARNING "CLBlast not found")
- endif()
-endif()
-
if (LLAMA_VULKAN)
find_package(Vulkan)
if (Vulkan_FOUND)
@@ -1265,7 +1248,6 @@ add_library(ggml OBJECT
ggml-quants.c
ggml-quants.h
${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
- ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
${GGML_SOURCES_METAL} ${GGML_HEADERS_METAL}
${GGML_SOURCES_RPC} ${GGML_HEADERS_RPC}
${GGML_SOURCES_EXTRA} ${GGML_HEADERS_EXTRA}
@@ -1353,8 +1335,9 @@ install(FILES ${CMAKE_CURRENT_BINARY_DIR}/LlamaConfig.cmake
DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/Llama)
set(GGML_PUBLIC_HEADERS "ggml.h" "ggml-alloc.h" "ggml-backend.h"
- "${GGML_HEADERS_CUDA}" "${GGML_HEADERS_OPENCL}"
- "${GGML_HEADERS_METAL}" "${GGML_HEADERS_EXTRA}")
+ "${GGML_HEADERS_CUDA}"
+ "${GGML_HEADERS_METAL}"
+ "${GGML_HEADERS_EXTRA}")
set_target_properties(ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS}")
install(TARGETS ggml PUBLIC_HEADER)