diff options
Diffstat (limited to 'ggml-cuda.cu')
-rw-r--r-- | ggml-cuda.cu | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 5b6c9091..75a2ad48 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -2713,6 +2713,7 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t } GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_tensor * op) { + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *) backend->context; switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { @@ -2840,8 +2841,16 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: - case GGML_OP_FLASH_ATTN_EXT: return true; + case GGML_OP_FLASH_ATTN_EXT: +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + return op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128; +#else + if (op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128) { + return true; + } + return ggml_cuda_info().devices[cuda_ctx->device].cc >= CC_VOLTA; +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) default: return false; } |