summaryrefslogtreecommitdiff
path: root/tests/test-grad0.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-06-05 11:29:20 +0300
committerGitHub <noreply@github.com>2024-06-05 11:29:20 +0300
commit2b3389677a833cee0880226533a1768b1a9508d2 (patch)
tree3af4beed34ff6c1309d202a7028f5ab85ec43693 /tests/test-grad0.cpp
parent9973e81c5ccf4f31b3980f5aa73f5cfea8699860 (diff)
ggml : refactor rope norm/neox (#7634)
* ggml : unify rope norm/neox (CPU) * ggml : fix compile warning * ggml : remove GLM rope mode ggml-ci * metal : better rope implementation ggml-ci * cuda : better rope implementation ggml-ci * naming : n_orig_ctx -> n_ctx_orig ggml-ci * dev : add reminders to update backends ggml-ci * vulkan : fix ggml_rope_ext() usage * cuda : fix array size + indents ggml-ci
Diffstat (limited to 'tests/test-grad0.cpp')
-rw-r--r--tests/test-grad0.cpp4
1 files changed, 2 insertions, 2 deletions
diff --git a/tests/test-grad0.cpp b/tests/test-grad0.cpp
index 21ca43be..a3532764 100644
--- a/tests/test-grad0.cpp
+++ b/tests/test-grad0.cpp
@@ -1465,7 +1465,7 @@ int main(int argc, const char ** argv) {
continue;
}
- struct ggml_tensor * f = ggml_sum(ctx0, ggml_rope(ctx0, x[0], p, n_rot, mode, 0));
+ struct ggml_tensor * f = ggml_sum(ctx0, ggml_rope(ctx0, x[0], p, n_rot, mode));
GGML_PRINT_DEBUG("rope f32: n_past: %d n_rot: %d mode: %d\n", n_past, n_rot, mode);
check_gradient("rope f32", ctx0, x, f, ndims, nargs, 1e-2f, 1e-3f, INFINITY);
@@ -1505,7 +1505,7 @@ int main(int argc, const char ** argv) {
continue;
}
- struct ggml_tensor * f = ggml_sum(ctx0, ggml_rope(ctx0, x[0], p, n_rot, mode, 0));
+ struct ggml_tensor * f = ggml_sum(ctx0, ggml_rope(ctx0, x[0], p, n_rot, mode));
GGML_PRINT_DEBUG("rope f16: n_past: %d n_rot: %d mode: %d\n", n_past, n_rot, mode);
check_gradient("rope f16", ctx0, x, f, ndims, nargs, 1e-1f, 1e-1f, INFINITY);