diff options
author | Johannes Gäßler <johannesg@5d6.de> | 2024-05-12 19:40:45 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-12 19:40:45 +0200 |
commit | dc685be46622a8fabfd57cfa804237c8f15679b8 (patch) | |
tree | 43b1baf9bb0ab8d39e68f0e865a34fad37a59370 /ggml-cuda.cu | |
parent | 6f1b63606fc68a09d62d1d74dbd156c35219026d (diff) |
CUDA: add FP32 FlashAttention vector kernel (#7188)
* CUDA: add FP32 FlashAttention vector kernel
* fixup! CUDA: add FP32 FlashAttention vector kernel
* fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
* fixup! fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
Diffstat (limited to 'ggml-cuda.cu')
-rw-r--r-- | ggml-cuda.cu | 11 |
1 files changed, 10 insertions, 1 deletions
diff --git a/ggml-cuda.cu b/ggml-cuda.cu index 5b6c9091..75a2ad48 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -2713,6 +2713,7 @@ GGML_CALL static enum ggml_status ggml_backend_cuda_graph_compute(ggml_backend_t } GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, const ggml_tensor * op) { + ggml_backend_cuda_context * cuda_ctx = (ggml_backend_cuda_context *) backend->context; switch (op->op) { case GGML_OP_UNARY: switch (ggml_get_unary_op(op)) { @@ -2840,8 +2841,16 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons case GGML_OP_ARANGE: case GGML_OP_TIMESTEP_EMBEDDING: case GGML_OP_LEAKY_RELU: - case GGML_OP_FLASH_ATTN_EXT: return true; + case GGML_OP_FLASH_ATTN_EXT: +#if defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) + return op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128; +#else + if (op->src[0]->ne[0] == 64 || op->src[0]->ne[0] == 128) { + return true; + } + return ggml_cuda_info().devices[cuda_ctx->device].cc >= CC_VOLTA; +#endif // defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__) default: return false; } |