diff options
author | slaren <slarengh@gmail.com> | 2023-12-23 16:10:51 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-12-23 16:10:51 +0100 |
commit | 708e179e8562c2604240df95a2241dea17fd808b (patch) | |
tree | 28f9fc81bdf1f4e0a3631347d91464cf364932ac /llama.cpp | |
parent | 925e5584a058afb612f9c20bc472c130f5d0f891 (diff) |
fallback to CPU buffer if host buffer alloc fails (#4610)
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 16 |
1 files changed, 11 insertions, 5 deletions
@@ -1177,21 +1177,27 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_ } static ggml_backend_buffer_type_t llama_default_buffer_type(int n_gpu_layers) { + ggml_backend_buffer_type_t buft = nullptr; + #ifdef GGML_USE_METAL if (n_gpu_layers > 0) { - return ggml_backend_metal_buffer_type(); + buft = ggml_backend_metal_buffer_type(); } #elif defined(GGML_USE_CUBLAS) && defined(LLAMA_GGML_BACKEND_CUDA_TEST) if (n_gpu_layers > 0) { - return ggml_backend_cuda_buffer_type(0); + buft = ggml_backend_cuda_buffer_type(0); } #elif defined(GGML_USE_CUBLAS) - return ggml_backend_cuda_host_buffer_type(); + buft = ggml_backend_cuda_host_buffer_type(); #elif defined(GGML_USE_CPU_HBM) - return ggml_backend_cpu_hbm_buffer_type(); + buft = ggml_backend_cpu_hbm_buffer_type(); #endif - return ggml_backend_cpu_buffer_type(); + if (buft == nullptr) { + buft = ggml_backend_cpu_buffer_type(); + } + + return buft; GGML_UNUSED(n_gpu_layers); } |