summaryrefslogtreecommitdiff
path: root/ggml-cuda/common.cuh
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2024-05-12 19:40:45 +0200
committerGitHub <noreply@github.com>2024-05-12 19:40:45 +0200
commitdc685be46622a8fabfd57cfa804237c8f15679b8 (patch)
tree43b1baf9bb0ab8d39e68f0e865a34fad37a59370 /ggml-cuda/common.cuh
parent6f1b63606fc68a09d62d1d74dbd156c35219026d (diff)
CUDA: add FP32 FlashAttention vector kernel (#7188)
* CUDA: add FP32 FlashAttention vector kernel * fixup! CUDA: add FP32 FlashAttention vector kernel * fixup! fixup! CUDA: add FP32 FlashAttention vector kernel * fixup! fixup! fixup! CUDA: add FP32 FlashAttention vector kernel
Diffstat (limited to 'ggml-cuda/common.cuh')
-rw-r--r--ggml-cuda/common.cuh4
1 files changed, 4 insertions, 0 deletions
diff --git a/ggml-cuda/common.cuh b/ggml-cuda/common.cuh
index 44e67e04..b6f0bc36 100644
--- a/ggml-cuda/common.cuh
+++ b/ggml-cuda/common.cuh
@@ -321,6 +321,10 @@ static __device__ __forceinline__ int __dp4a(const int a, const int b, int c) {
#define FP16_MMA_AVAILABLE !(defined(GGML_USE_HIPBLAS) && defined(__HIP_PLATFORM_AMD__)) && __CUDA_ARCH__ >= CC_VOLTA
+static bool fast_fp16_available(const int cc) {
+ return cc >= CC_PASCAL && cc != 610;
+}
+
static bool fp16_mma_available(const int cc) {
return cc < CC_OFFSET_AMD && cc >= CC_VOLTA;
}