diff options
author | Guillaume Wenzek <gwenzek@users.noreply.github.com> | 2023-12-29 18:07:03 +0100 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2024-01-03 14:38:38 +0200 |
commit | 5f66ebca9c41a17385341da4b553a8eb5f07edee (patch) | |
tree | 9295b1ef1cfef67c8ac42a9bed88892380a46069 /tests/test-backend-ops.cpp | |
parent | f2eb19bd8bc9f5730d6e05d7a52a9e19bf5ac099 (diff) |
ggml : extend ggml_get_rows, ggml_repeat, ggml_concat (ggml/639)
* add more int ops
* ggml_compute_forward_dup_bytes
* add tests
* PR comments
* tests : minor indentations
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'tests/test-backend-ops.cpp')
-rw-r--r-- | tests/test-backend-ops.cpp | 42 |
1 files changed, 36 insertions, 6 deletions
diff --git a/tests/test-backend-ops.cpp b/tests/test-backend-ops.cpp index eff063b2..44412cb9 100644 --- a/tests/test-backend-ops.cpp +++ b/tests/test-backend-ops.cpp @@ -58,6 +58,9 @@ static void init_tensor_uniform(ggml_tensor * tensor, float min = -1.0f, float m int64_t hist[16]; ggml_quantize_chunk(tensor->type, data.data(), dataq.data(), 0, size, hist); ggml_backend_tensor_set(tensor, dataq.data(), 0, dataq.size()); + } else if (tensor->type == GGML_TYPE_I8 || tensor->type == GGML_TYPE_I16 || tensor->type == GGML_TYPE_I32) { + // This is going to create some weird integers though. + ggml_backend_tensor_set(tensor, data.data(), 0, ggml_nbytes(tensor)); } else { GGML_ASSERT(false); } @@ -87,8 +90,13 @@ static std::vector<float> tensor_to_float(const ggml_tensor * t) { tv.push_back(*(float *) &buf[i]); } else if (t->type == GGML_TYPE_I32) { tv.push_back((float)*(int32_t *) &buf[i]); + } else if (t->type == GGML_TYPE_I16) { + tv.push_back((float)*(int16_t *) &buf[i]); + } else if (t->type == GGML_TYPE_I8) { + tv.push_back((float)*(int8_t *) &buf[i]); } else if (quantized) { - tt.to_float(&buf[i], vq.data(), bs); + std::vector<float> vq(ggml_blck_size(t->type)); + tt.to_float(&buf[i], vq.data(), ggml_blck_size(t->type)); tv.insert(tv.end(), vq.begin(), vq.end()); } else { GGML_ASSERT(false); @@ -661,17 +669,26 @@ struct test_repeat : public test_case { struct test_dup : public test_case { const ggml_type type; const std::array<int64_t, 4> ne; + const std::array<int64_t, 4> permute; + bool _use_permute; std::string vars() override { - return VARS_TO_STR2(type, ne); + std::string v = VARS_TO_STR2(type, ne); + if (_use_permute) v += "," + VAR_TO_STR(permute); + return v; } test_dup(ggml_type type = GGML_TYPE_F32, - std::array<int64_t, 4> ne = {10, 10, 10, 1}) - : type(type), ne(ne) {} + std::array<int64_t, 4> ne = {10, 10, 10, 1}, + std::array<int64_t, 4> permute = {0, 0, 0, 0}) + : type(type), ne(ne), permute(permute), + _use_permute(permute[0] + permute[1] + permute[2] + permute[3] > 0) {} ggml_tensor * build_graph(ggml_context * ctx) override { ggml_tensor * src = ggml_new_tensor(ctx, type, 4, ne.data()); + if (_use_permute) { + src = ggml_permute(ctx, src, permute[0], permute[1], permute[2], permute[3]); + } ggml_tensor * out = ggml_dup(ctx, src); return out; } @@ -1450,14 +1467,26 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op } } } + for (int b : {1, 7}) { + for (bool v : {false, true}) { + test_cases.emplace_back(new test_get_rows(GGML_TYPE_I32, 256, 5, 4, b, v)); + } + } test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 1})); test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {2, 1, 1, 1})); test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 2, 1, 1})); test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 2, 1})); test_cases.emplace_back(new test_repeat(GGML_TYPE_F32, {10, 10, 10, 10}, {1, 1, 1, 2})); + test_cases.emplace_back(new test_repeat(GGML_TYPE_I32, {10, 10, 10, 10}, {2, 1, 1, 1})); + test_cases.emplace_back(new test_repeat(GGML_TYPE_I16, {10, 10, 10, 10}, {1, 1, 1, 2})); - test_cases.emplace_back(new test_dup()); + test_cases.emplace_back(new test_dup(GGML_TYPE_F32)); + test_cases.emplace_back(new test_dup(GGML_TYPE_F16)); + test_cases.emplace_back(new test_dup(GGML_TYPE_I32)); + test_cases.emplace_back(new test_dup(GGML_TYPE_I16)); + test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {0, 2, 1, 3})); + test_cases.emplace_back(new test_dup(GGML_TYPE_I16, {10, 8, 3, 1}, {1, 2, 0, 3})); for (ggml_type type : all_types) { test_cases.emplace_back(new test_cpy(GGML_TYPE_F32, type, {256, 10, 10, 1})); @@ -1565,7 +1594,8 @@ static bool test_backend(ggml_backend_t backend, test_mode mode, const char * op test_cases.emplace_back(new test_alibi()); test_cases.emplace_back(new test_im2col()); - test_cases.emplace_back(new test_concat()); + test_cases.emplace_back(new test_concat(GGML_TYPE_F32)); + test_cases.emplace_back(new test_concat(GGML_TYPE_I32)); for (ggml_sort_order order : {GGML_SORT_ASC, GGML_SORT_DESC}) { test_cases.emplace_back(new test_argsort(GGML_TYPE_F32, {8, 1, 1, 1}, order)); |