summaryrefslogtreecommitdiff
path: root/ggml-cuda
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2024-06-01 23:26:10 +0200
committerGitHub <noreply@github.com>2024-06-01 23:26:10 +0200
commite141ce624af57bdffbaf57014a044eb1d9689230 (patch)
treef41f3a1b08c3ce608de244e0e45ff1ed699d2ce9 /ggml-cuda
parent2e666832e6ac78194edf030bd1c295e21bdb022c (diff)
Fix FlashAttention debug test, FP32 assert (#7684)
Diffstat (limited to 'ggml-cuda')
-rw-r--r--ggml-cuda/fattn-vec-f32.cuh4
1 files changed, 0 insertions, 4 deletions
diff --git a/ggml-cuda/fattn-vec-f32.cuh b/ggml-cuda/fattn-vec-f32.cuh
index ce23a4eb..ddf0c837 100644
--- a/ggml-cuda/fattn-vec-f32.cuh
+++ b/ggml-cuda/fattn-vec-f32.cuh
@@ -278,14 +278,10 @@ void ggml_cuda_flash_attn_ext_vec_f32_case_impl(ggml_backend_cuda_context & ctx,
template <int D, ggml_type type_K, ggml_type type_V>
void ggml_cuda_flash_attn_ext_vec_f32_case(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
- ggml_tensor * KQV = dst;
ggml_tensor * Q = dst->src[0];
ggml_tensor * K = dst->src[1];
ggml_tensor * V = dst->src[2];
- const int32_t precision = KQV->op_params[2];
- GGML_ASSERT(precision == GGML_PREC_DEFAULT);
-
GGML_ASSERT(K->type == type_K);
GGML_ASSERT(V->type == type_V);