diff options
author | Meng Zhang <meng@tabbyml.com> | 2023-11-06 22:49:08 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-11-07 08:49:08 +0200 |
commit | 46876d2a2c92e60579dc732cdb8cbd243b06f317 (patch) | |
tree | 8387e95867f96505ccbc909133eaa189e479db32 /ggml-cuda.h | |
parent | 381efbf480959bb6d1e247a8b0c2328f22e350f8 (diff) |
cuda : supports running on CPU for GGML_USE_CUBLAS=ON build (#3946)
* protyping the idea that supports running on CPU for a GGML_USE_CUBLAS=on build
* doc: add comments to ggml_cublas_loaded()
* fix defined(...)
Diffstat (limited to 'ggml-cuda.h')
-rw-r--r-- | ggml-cuda.h | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/ggml-cuda.h b/ggml-cuda.h index 57adc9cf..528e66c3 100644 --- a/ggml-cuda.h +++ b/ggml-cuda.h @@ -17,7 +17,12 @@ extern "C" { #define GGML_CUDA_MAX_DEVICES 16 +// Always success. To check if CUDA is actually loaded, use `ggml_cublas_loaded`. GGML_API void ggml_init_cublas(void); + +// Returns `true` if there are available CUDA devices and cublas loads successfully; otherwise, it returns `false`. +GGML_API bool ggml_cublas_loaded(void); + GGML_API void * ggml_cuda_host_malloc(size_t size); GGML_API void ggml_cuda_host_free(void * ptr); |