summaryrefslogtreecommitdiff
path: root/tests
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-12-21 23:20:49 +0200
committerGitHub <noreply@github.com>2023-12-21 23:20:49 +0200
commitafefa319f1f59b002dfa0d1ef407a2c74bd9770b (patch)
treea6923e0a6214293d88957cd11e25943f2c0fb80a /tests
parent769a7bc85eaa44e3d7eadf39abfeff7bb0b9cc2f (diff)
ggml : change ggml_scale to take a float instead of tensor (#4573)
* ggml : change ggml_scale to take a float instead of tensor * ggml : fix CPU implementation * tests : fix test-grad0 ggml-ci
Diffstat (limited to 'tests')
-rw-r--r--tests/test-backend-ops.cpp9
-rw-r--r--tests/test-grad0.cpp10
2 files changed, 10 insertions, 9 deletions
diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp
index f04b9438..f3df8a8c 100644
--- a/tests/test-backend-ops.cpp
+++ b/tests/test-backend-ops.cpp
@@ -766,18 +766,19 @@ struct test_bin_bcast : public test_case {
struct test_scale : public test_case {
const ggml_type type;
const std::array<int64_t, 4> ne;
+ float scale;
std::string vars() override {
- return VARS_TO_STR2(type, ne);
+ return VARS_TO_STR3(type, ne, scale);
}
test_scale(ggml_type type = GGML_TYPE_F32,
- std::array<int64_t, 4> ne = {10, 10, 10, 10})
- : type(type), ne(ne) {}
+ std::array<int64_t, 4> ne = {10, 10, 10, 10},
+ float scale = 2.0f)
+ : type(type), ne(ne), scale(scale) {}
ggml_tensor * build_graph(ggml_context * ctx) override {
ggml_tensor * a = ggml_new_tensor(ctx, type, 4, ne.data());
- ggml_tensor * scale = ggml_new_tensor_1d(ctx, type, 1);
ggml_tensor * out = ggml_scale(ctx, a, scale);
return out;
}
diff --git a/tests/test-grad0.cpp b/tests/test-grad0.cpp
index 81c20a89..14914def 100644
--- a/tests/test-grad0.cpp
+++ b/tests/test-grad0.cpp
@@ -881,19 +881,19 @@ int main(int argc, const char ** argv) {
// scale
{
srand(seed);
- const int nargs = 2;
+ const int nargs = 1;
int64_t ne2[4];
ne2[0] = 1;
for (int ndims = 1; ndims <= 2; ++ndims) {
- x[1] = get_random_tensor_f32(ctx0, 1, ne2, -1.0f, 1.0f);
x[0] = get_random_tensor_f32(ctx0, ndims, ne, -1.0f, 1.0f);
+ const float s = -1.0f + 2.0f*frand();
+
ggml_set_param(ctx0, x[0]);
- ggml_set_param(ctx0, x[1]);
- struct ggml_tensor * f = ggml_sum(ctx0, ggml_scale(ctx0, x[0], x[1]));
+ struct ggml_tensor * f = ggml_sum(ctx0, ggml_scale(ctx0, x[0], s));
check_gradient("scale", ctx0, x, f, ndims, nargs, 1e-3f, 1e-3f, INFINITY);
}
@@ -1395,7 +1395,7 @@ int main(int argc, const char ** argv) {
ggml_add1(ctx0,
ggml_scale(ctx0,
ggml_soft_max(ctx0, x[0]),
- ggml_new_f32(ctx0, 1.0f - eps)),
+ 1.0f - eps),
ggml_new_f32(ctx0, eps))));
check_gradient("softmax", ctx0, x, f, ndims, nargs, 1e-3f, 2e-1f, INFINITY);