summaryrefslogtreecommitdiff
path: root/tests/test-backend-ops.cpp
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2024-06-01 23:26:10 +0200
committerGitHub <noreply@github.com>2024-06-01 23:26:10 +0200
commite141ce624af57bdffbaf57014a044eb1d9689230 (patch)
treef41f3a1b08c3ce608de244e0e45ff1ed699d2ce9 /tests/test-backend-ops.cpp
parent2e666832e6ac78194edf030bd1c295e21bdb022c (diff)
Fix FlashAttention debug test, FP32 assert (#7684)
Diffstat (limited to 'tests/test-backend-ops.cpp')
-rw-r--r--tests/test-backend-ops.cpp8
1 files changed, 5 insertions, 3 deletions
diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp
index 77723012..8dc90a45 100644
--- a/tests/test-backend-ops.cpp
+++ b/tests/test-backend-ops.cpp
@@ -1584,9 +1584,11 @@ struct test_flash_attn_ext : public test_case {
: hs(hs), nh(nh), kv(kv), nb(nb), mask(mask), max_bias(max_bias), type_KV(type_KV) {}
ggml_tensor * build_graph(ggml_context * ctx) override {
- ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs, nb, nh, 1);
- ggml_tensor * k = ggml_new_tensor_4d(ctx, type_KV, hs, kv, nh, 1);
- ggml_tensor * v = ggml_new_tensor_4d(ctx, type_KV, hs, kv, nh, 1);
+ const int64_t hs_padded = GGML_PAD(hs, ggml_blck_size(type_KV));
+
+ ggml_tensor * q = ggml_new_tensor_4d(ctx, GGML_TYPE_F32, hs_padded, nb, nh, 1);
+ ggml_tensor * k = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
+ ggml_tensor * v = ggml_new_tensor_4d(ctx, type_KV, hs_padded, kv, nh, 1);
ggml_tensor * m = mask ? ggml_new_tensor_4d(ctx, GGML_TYPE_F16, kv, GGML_PAD(nb, GGML_KQ_MASK_PAD), 1, 1) : nullptr;
ggml_tensor * out = ggml_flash_attn_ext(ctx, q, k, v, m, 1.0f/sqrtf(hs), max_bias);
return out;