summaryrefslogtreecommitdiff
path: root/ggml.c
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-06-12 16:00:22 +0300
committerGitHub <noreply@github.com>2024-06-12 16:00:22 +0300
commita9cae48003dfc4fe95b8f5c81682fc6e63425235 (patch)
tree36765340a3c000159361c32f379016b05e3db5d0 /ggml.c
parentbfaa676b0841617d4ef3596e63aca6be1a8eb1b5 (diff)
tests : add non-cont unary tests (#7857)
* tests : add non-cont unary tests * ggml : update unary asserts and "supports_op" ggml-ci
Diffstat (limited to 'ggml.c')
-rw-r--r--ggml.c97
1 files changed, 45 insertions, 52 deletions
diff --git a/ggml.c b/ggml.c
index 5fb9e9a3..2ea1d767 100644
--- a/ggml.c
+++ b/ggml.c
@@ -7345,6 +7345,8 @@ static struct ggml_tensor * ggml_unary_impl(
struct ggml_tensor * a,
enum ggml_unary_op op,
bool inplace) {
+ GGML_ASSERT(ggml_is_contiguous_1(a));
+
bool is_node = false;
if (!inplace && (a->grad)) {
@@ -11009,6 +11011,8 @@ static void ggml_compute_forward_abs_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11018,9 +11022,6 @@ static void ggml_compute_forward_abs_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_abs_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11055,6 +11056,8 @@ static void ggml_compute_forward_sgn_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11064,9 +11067,6 @@ static void ggml_compute_forward_sgn_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_sgn_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11101,6 +11101,8 @@ static void ggml_compute_forward_neg_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11110,9 +11112,6 @@ static void ggml_compute_forward_neg_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_neg_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11147,6 +11146,8 @@ static void ggml_compute_forward_step_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11156,9 +11157,6 @@ static void ggml_compute_forward_step_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_step_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11193,6 +11191,8 @@ static void ggml_compute_forward_tanh_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11202,9 +11202,6 @@ static void ggml_compute_forward_tanh_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_tanh_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11239,6 +11236,8 @@ static void ggml_compute_forward_elu_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11248,9 +11247,6 @@ static void ggml_compute_forward_elu_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_elu_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11285,6 +11281,8 @@ static void ggml_compute_forward_relu_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11294,9 +11292,6 @@ static void ggml_compute_forward_relu_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_relu_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11331,6 +11326,8 @@ static void ggml_compute_forward_sigmoid_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11340,9 +11337,6 @@ static void ggml_compute_forward_sigmoid_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_sigmoid_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11376,9 +11370,9 @@ static void ggml_compute_forward_gelu_f32(
const struct ggml_tensor * src0 = dst->src[0];
- GGML_ASSERT(ggml_is_contiguous_1(src0));
- GGML_ASSERT(ggml_is_contiguous_1(dst));
- GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
+ assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
@@ -11439,9 +11433,9 @@ static void ggml_compute_forward_gelu_quick_f32(
const struct ggml_tensor * src0 = dst->src[0];
- GGML_ASSERT(ggml_is_contiguous_1(src0));
- GGML_ASSERT(ggml_is_contiguous_1(dst));
- GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
+ assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
@@ -11502,9 +11496,9 @@ static void ggml_compute_forward_silu_f32(
const struct ggml_tensor * src0 = dst->src[0];
- GGML_ASSERT(ggml_is_contiguous_1(src0));
- GGML_ASSERT(ggml_is_contiguous_1(dst));
- GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
+ assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
@@ -11565,6 +11559,8 @@ static void ggml_compute_forward_leaky_relu_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11614,11 +11610,11 @@ static void ggml_compute_forward_silu_back_f32(
const struct ggml_tensor * src0 = dst->src[0];
const struct ggml_tensor * grad = dst->src[1];
- GGML_ASSERT(ggml_is_contiguous_1(grad));
- GGML_ASSERT(ggml_is_contiguous_1(src0));
- GGML_ASSERT(ggml_is_contiguous_1(dst));
- GGML_ASSERT(ggml_are_same_shape(src0, dst));
- GGML_ASSERT(ggml_are_same_shape(src0, grad));
+ assert(ggml_is_contiguous_1(grad));
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
+ assert(ggml_are_same_shape(src0, dst));
+ assert(ggml_are_same_shape(src0, grad));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
@@ -11680,6 +11676,8 @@ static void ggml_compute_forward_hardswish_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11689,9 +11687,6 @@ static void ggml_compute_forward_hardswish_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_hardswish_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -11723,6 +11718,8 @@ static void ggml_compute_forward_hardsigmoid_f32(
const struct ggml_tensor * src0 = dst->src[0];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -11732,9 +11729,6 @@ static void ggml_compute_forward_hardsigmoid_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert(dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
ggml_vec_hardsigmoid_f32(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -16681,7 +16675,10 @@ static void ggml_compute_forward_map_unary_f32(
const struct ggml_tensor * src0 = dst->src[0];
- GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(dst));
+ assert(ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
@@ -16690,9 +16687,6 @@ static void ggml_compute_forward_map_unary_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert( dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
fun(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),
@@ -16730,6 +16724,9 @@ static void ggml_compute_forward_map_binary_f32(
const struct ggml_tensor * src1 = dst->src[1];
assert(params->ith == 0);
+ assert(ggml_is_contiguous_1(src0));
+ assert(ggml_is_contiguous_1(src1));
+ assert(ggml_is_contiguous_1(dst));
assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
@@ -16739,10 +16736,6 @@ static void ggml_compute_forward_map_binary_f32(
const int n = ggml_nrows(src0);
const int nc = src0->ne[0];
- assert( dst->nb[0] == sizeof(float));
- assert(src0->nb[0] == sizeof(float));
- assert(src1->nb[0] == sizeof(float));
-
for (int i = 0; i < n; i++) {
fun(nc,
(float *) ((char *) dst->data + i*( dst->nb[1])),