summaryrefslogtreecommitdiff
path: root/ggml-cuda.cu
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-12-23 16:10:51 +0100
committerGitHub <noreply@github.com>2023-12-23 16:10:51 +0100
commit708e179e8562c2604240df95a2241dea17fd808b (patch)
tree28f9fc81bdf1f4e0a3631347d91464cf364932ac /ggml-cuda.cu
parent925e5584a058afb612f9c20bc472c130f5d0f891 (diff)
fallback to CPU buffer if host buffer alloc fails (#4610)
Diffstat (limited to 'ggml-cuda.cu')
-rw-r--r--ggml-cuda.cu11
1 files changed, 6 insertions, 5 deletions
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index 490081ca..f9830328 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -6729,8 +6729,7 @@ void * ggml_cuda_host_malloc(size_t size) {
void * ptr = nullptr;
cudaError_t err = cudaMallocHost((void **) &ptr, size);
if (err != cudaSuccess) {
- // The allocation error can be bypassed. A null ptr will assigned out of this function.
- // This can fixed the OOM error in WSL.
+ // clear the error
cudaGetLastError();
fprintf(stderr, "WARNING: failed to allocate %.2f MB of pinned memory: %s\n",
size/1024.0/1024.0, cudaGetErrorString(err));
@@ -9674,12 +9673,14 @@ ggml_backend_buffer_type_t ggml_backend_cuda_buffer_type(int device) {
// host buffer type
static void ggml_backend_cuda_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
- CUDA_CHECK(cudaFreeHost(buffer->context));
+ ggml_cuda_host_free(buffer->context);
}
static ggml_backend_buffer_t ggml_backend_cuda_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
- void * ptr;
- CUDA_CHECK(cudaMallocHost(&ptr, size));
+ void * ptr = ggml_cuda_host_malloc(size);
+ if (ptr == nullptr) {
+ return nullptr;
+ }
// FIXME: this is a hack to avoid having to implement a new buffer type
ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);