summaryrefslogtreecommitdiff
path: root/tests/test-backend-ops.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'tests/test-backend-ops.cpp')
-rw-r--r--tests/test-backend-ops.cpp104
1 files changed, 95 insertions, 9 deletions
diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp
index 7c504e93..2c03c60d 100644
--- a/tests/test-backend-ops.cpp
+++ b/tests/test-backend-ops.cpp
@@ -1,7 +1,6 @@
#include <ggml.h>
#include <ggml-alloc.h>
#include <ggml-backend.h>
-#include <ggml-backend-impl.h>
#include <algorithm>
#include <array>
@@ -80,8 +79,16 @@ static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float m
im = nullptr;
}
}
+
ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size/tensor->ne[0], tensor->ne[0], im);
GGML_ASSERT(ggml_validate_row_data(tensor->type, dataq.data(), dataq.size()));
+ // TODO: other cases
+ //#pragma omp parallel for
+ //for (int i = 0; i < tensor->ne[1]; i++) {
+ // ggml_quantize_chunk(tensor->type, data.data(), dataq.data(),
+ // i * tensor->ne[0], 1, tensor->ne[0], im);
+ //}
+
ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size());
} else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) {
// This is going to create some weird integers though.
@@ -760,7 +767,7 @@ struct test_dup : public test_case {
}
test_dup(ggml_type type = GGML_TYPE_F32,
- std::array<int64_t, 4> ne = {10, 10, 10, 1},
+ std::array<int64_t, 4> ne = {10, 10, 20, 1},
std::array<int64_t, 4> permute = {0, 0, 0, 0})
: type(type), ne(ne), permute(permute),
_use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
@@ -780,9 +787,15 @@ struct test_cpy : public test_case {
const ggml_type type_src;
const ggml_type type_dst;
const std::array<int64_t, 4> ne;
+ const std::array<int64_t, 4> permute;
+ bool _src_use_permute;
std::string vars() override {
- return VARS_TO_STR3(type_src, type_dst, ne);
+ return VARS_TO_STR4(type_src, type_dst, ne, permute);
+ }
+
+ double max_nmse_err() override {
+ return 1e-6;
}
size_t op_size(ggml_tensor * t) override {
@@ -790,12 +803,18 @@ struct test_cpy : public test_case {
}
test_cpy(ggml_type type_src = GGML_TYPE_F32, ggml_type type_dst = GGML_TYPE_F32,
- std::array<int64_t, 4> ne = {10, 10, 10, 1})
- : type_src(type_src), type_dst(type_dst), ne(ne) {}
+ std::array<int64_t, 4> ne = {10, 10, 10, 1},
+ std::array<int64_t, 4> permute = {0, 0, 0, 0},
+ bool _dst_use_permute = false)
+ : type_src(type_src), type_dst(type_dst), ne(ne), permute(permute),
+ _src_use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {}
ggml_tensor * build_graph(ggml_context * ctx) override {
ggml_tensor * src = ggml_new_tensor(ctx, type_src, 4, ne.data());
- ggml_tensor * dst = ggml_new_tensor(ctx, type_dst, 4, ne.data());
+ if (_src_use_permute) {
+ src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]);
+ }
+ ggml_tensor* dst = ggml_new_tensor(ctx, type_dst, 4, src->ne);
ggml_tensor * out = ggml_cpy(ctx, src, dst);
return out;
}
@@ -1171,6 +1190,7 @@ struct test_soft_max : public test_case {
}
};
+
// GGML_OP_ROPE
struct test_rope : public test_case {
const ggml_type type;
@@ -1263,6 +1283,32 @@ struct test_pool2d : public test_case {
}
};
+// GGML_OP_CONV_TRANSPOSE_1D
+struct test_conv_transpose_1d : public test_case {
+ const std::array<int64_t, 4> ne_input;
+ const std::array<int64_t, 4> ne_kernel;
+
+ const int s0; // stride
+ const int p0; // padding
+ const int d0; // dilation
+
+ std::string vars() override {
+ return VARS_TO_STR5(ne_input, ne_kernel, s0, p0, d0);
+ }
+
+ test_conv_transpose_1d(std::array<int64_t, 4> ne_input = {197, 32, 1, 1}, // [input_width, input_height, input_channels, 1]
+ std::array<int64_t, 4> ne_kernel = {16, 32, 32, 1}, // [kernel_width, kernel_height, input_channels, 1]
+ int s0 = 1, int p0 = 0, int d0 = 1)
+ : ne_input(ne_input), ne_kernel(ne_kernel), s0(s0), p0(p0), d0(d0) {}
+
+ ggml_tensor * build_graph(ggml_context * ctx) override {
+ ggml_tensor * input = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_input.data());
+ ggml_tensor * kernel = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne_kernel.data());
+ ggml_tensor * out = ggml_conv_transpose_1d(ctx, kernel, input, s0, p0, d0);
+ return out;
+ }
+};
+
// GGML_OP_IM2COL
struct test_im2col : public test_case {
const ggml_type type_input;
@@ -1276,7 +1322,7 @@ struct test_im2col : public test_case {
// padding
const int p0;
const int p1;
- // dilatation
+ // dilation
const int d0;
const int d1;
// mode
@@ -2049,6 +2095,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
GGML_TYPE_IQ2_XS, GGML_TYPE_IQ2_S,
GGML_TYPE_IQ3_XXS, GGML_TYPE_IQ1_S, GGML_TYPE_IQ1_M,
GGML_TYPE_IQ4_NL, GGML_TYPE_IQ3_S, GGML_TYPE_IQ4_XS,
+ GGML_TYPE_BF16,
};
// unary ops
@@ -2094,6 +2141,16 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F32));
test_cases.emplace_back(new test_im2col(GGML_TYPE_F32, GGML_TYPE_F16, GGML_TYPE_F16));
+ test_cases.emplace_back(new test_conv_transpose_1d());
+ test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 3, 0, 1));
+ test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 2, 0, 1));
+ test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {2,3,2,1}, 1, 0, 1));
+ test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 2, 0, 1));
+ test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,2,2,1}, 1, 0, 1));
+ test_cases.emplace_back(new test_conv_transpose_1d({3,2,1,1}, {3,1,2,1}, 1, 0, 1));
+ test_cases.emplace_back(new test_conv_transpose_1d({2,1,1,1}, {3,1,1,1}, 1, 0, 1));
+
+
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1}));
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1}));
test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1}));
@@ -2106,12 +2163,22 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
test_cases.emplace_back(new test_dup(GGML_TYPE_F16));
test_cases.emplace_back(new test_dup(GGML_TYPE_I32));
test_cases.emplace_back(new test_dup(GGML_TYPE_I16));
+ test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {0, 2, 1, 3}));
+ test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {0, 2, 1, 3})); // dup by rows
+ test_cases.emplace_back(new test_dup(GGML_TYPE_F32, {10, 10, 5, 1}, {1, 0, 2, 3}));
+ test_cases.emplace_back(new test_dup(GGML_TYPE_F16, {10, 10, 5, 1}, {1, 0, 2, 3})); // dup dst not-contiguous
test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3}));
test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3}));
for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
for (ggml_type type_dst : all_types) {
test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 4, 4, 4}));
+ test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {0, 2, 1, 3})); // cpy by rows
+ }
+ }
+ for (ggml_type type_src : {GGML_TYPE_F16, GGML_TYPE_F32}) {
+ for (ggml_type type_dst : {GGML_TYPE_F16, GGML_TYPE_F32}) {
+ test_cases.emplace_back(new test_cpy(type_src, type_dst, {256, 2, 3, 4}, {1, 0, 2, 3})); // cpy not-contiguous
}
}
@@ -2161,6 +2228,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
test_cases.emplace_back(new test_rms_norm(GGML_TYPE_F32, {64, 10, 10, 10}, eps));
}
+#if 1
for (ggml_type type_a : base_types) {
for (ggml_type type_b : {GGML_TYPE_F32, GGML_TYPE_F16}) {
test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 1, 256, { 1, 1}, {1, 1}));
@@ -2180,6 +2248,24 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
test_cases.emplace_back(new test_mul_mat(type_a, type_b, 16, 16, 256, {10, 10}, {2, 2}));
}
}
+#else
+ // m = a rows
+ // n = b rows
+ // k = cols
+ std::uniform_int_distribution<> dist_m(1, 128);
+ std::uniform_int_distribution<> dist_n(16, 128);
+ std::uniform_int_distribution<> dist_k(1, 16);
+ for (int i = 0; i < 1000; i++) {
+ for (ggml_type type_a : all_types) {
+ for (ggml_type type_b : {GGML_TYPE_F32}) {
+ int m = dist_m(rng);
+ int n = dist_n(rng);
+ int k = dist_k(rng) * ggml_blck_size(type_a);
+ test_cases.emplace_back(new test_mul_mat(type_a, type_b, m, n, k, { 1, 1}, {1, 1}));
+ }
+ }
+ }
+#endif
for (ggml_type type_a : other_types) {
for (ggml_type type_b : {GGML_TYPE_F32}) {
@@ -2243,7 +2329,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
for (int n = 0; n < 10; ++n) {
int64_t ne0 = dist_ne0(rng);
int64_t ne1 = dist_ne1(rng);
- test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {ne0, ne1, 1, 1}, n/2 == 0, 0.1f, ne0 < 1000 ? 4.0f : 0.0f));
+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, GGML_TYPE_F32, {ne0, ne1, 1, 1}, n/2 == 0, 0.1f, ne0 < 1000 ? 4.0f : 0.0f));
}
exponent <<= 1;
@@ -2262,7 +2348,7 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op
}
}
}
-
+ test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, true, 0.1f, 0.0f));
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {16, 2, 32, 1}, false, 0.1f, 0.0f));
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 0.0f));
test_cases.emplace_back(new test_soft_max(GGML_TYPE_F32, {32, 2, 32, 1}, true, 0.1f, 8.0f));