summaryrefslogtreecommitdiff
path: root/ggml.c
diff options
context:
space:
mode:
Diffstat (limited to 'ggml.c')
-rw-r--r--ggml.c350
1 files changed, 175 insertions, 175 deletions
diff --git a/ggml.c b/ggml.c
index c09a3cad..1d81553f 100644
--- a/ggml.c
+++ b/ggml.c
@@ -2721,7 +2721,7 @@ static struct ggml_tensor * ggml_new_tensor_impl(
}
}
- struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
+ struct ggml_object * const obj_new = ggml_new_object(ctx, GGML_OBJECT_TYPE_TENSOR, GGML_TENSOR_SIZE + obj_alloc_size);
// TODO: for recoverable errors, we would need to free the data allocated from the scratch buffer here
@@ -2729,7 +2729,7 @@ static struct ggml_tensor * ggml_new_tensor_impl(
*result = (struct ggml_tensor) {
/*.type =*/ type,
- /*.backend =*/ GGML_BACKEND_CPU,
+ /*.backend =*/ GGML_BACKEND_TYPE_CPU,
/*.buffer =*/ NULL,
/*.ne =*/ { 1, 1, 1, 1 },
/*.nb =*/ { 0, 0, 0, 0 },
@@ -3302,7 +3302,7 @@ struct ggml_tensor * ggml_get_first_tensor(const struct ggml_context * ctx) {
char * const mem_buffer = ctx->mem_buffer;
while (obj != NULL) {
- if (obj->type == GGML_OBJECT_TENSOR) {
+ if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
return (struct ggml_tensor *)(mem_buffer + obj->offs);
}
@@ -3319,7 +3319,7 @@ struct ggml_tensor * ggml_get_next_tensor(const struct ggml_context * ctx, struc
char * const mem_buffer = ctx->mem_buffer;
while (obj != NULL) {
- if (obj->type == GGML_OBJECT_TENSOR) {
+ if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
return (struct ggml_tensor *)(mem_buffer + obj->offs);
}
@@ -3335,7 +3335,7 @@ struct ggml_tensor * ggml_get_tensor(struct ggml_context * ctx, const char * nam
char * const mem_buffer = ctx->mem_buffer;
while (obj != NULL) {
- if (obj->type == GGML_OBJECT_TENSOR) {
+ if (obj->type == GGML_OBJECT_TYPE_TENSOR) {
struct ggml_tensor * cur = (struct ggml_tensor *)(mem_buffer + obj->offs);
if (strcmp(cur->name, name) == 0) {
return cur;
@@ -5879,7 +5879,7 @@ struct ggml_tensor * ggml_top_k(
int k) {
GGML_ASSERT(a->ne[0] >= k);
- struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_DESC);
+ struct ggml_tensor * result = ggml_argsort(ctx, a, GGML_SORT_ORDER_DESC);
result = ggml_view_4d(ctx, result,
k, result->ne[1], result->ne[2], result->ne[3],
@@ -6673,7 +6673,7 @@ static void ggml_compute_forward_dup_same_cont(
GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
GGML_ASSERT(src0->type == dst->type);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -6705,7 +6705,7 @@ static void ggml_compute_forward_dup_f16(
GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -6978,7 +6978,7 @@ static void ggml_compute_forward_dup_f32(
GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7231,7 +7231,7 @@ static void ggml_compute_forward_dup_bytes(
GGML_ASSERT(ggml_nelements(dst) == ggml_nelements(src0));
GGML_ASSERT(src0->type == dst->type);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7411,7 +7411,7 @@ static void ggml_compute_forward_add_f32(
GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7419,7 +7419,7 @@ static void ggml_compute_forward_add_f32(
const int nth = params->nth;
#ifdef GGML_USE_CLBLAST
- if (src1->backend == GGML_BACKEND_GPU) {
+ if (src1->backend == GGML_BACKEND_TYPE_GPU) {
// TODO: OpenCL kernel support full broadcast
GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
if (ith == 0) {
@@ -7501,7 +7501,7 @@ static void ggml_compute_forward_add_f16_f32(
GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7580,7 +7580,7 @@ static void ggml_compute_forward_add_f16_f16(
GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7636,7 +7636,7 @@ static void ggml_compute_forward_add_q_f32(
GGML_ASSERT(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7774,7 +7774,7 @@ static void ggml_compute_forward_add1_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_is_scalar(src1));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7828,7 +7828,7 @@ static void ggml_compute_forward_add1_f16_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_is_scalar(src1));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7880,7 +7880,7 @@ static void ggml_compute_forward_add1_f16_f16(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_is_scalar(src1));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -7932,7 +7932,7 @@ static void ggml_compute_forward_add1_q_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_is_scalar(src1));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8062,7 +8062,7 @@ static void ggml_compute_forward_acc_f32(
size_t offset = ((int32_t *) dst->op_params)[3];
bool inplace = (bool) ((int32_t *) dst->op_params)[4];
- if (!inplace && (params->type == GGML_TASK_INIT)) {
+ if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
if (params->ith != 0) {
return;
}
@@ -8074,7 +8074,7 @@ static void ggml_compute_forward_acc_f32(
ggml_nbytes(dst));
}
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8176,7 +8176,7 @@ static void ggml_compute_forward_sub_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8257,14 +8257,14 @@ static void ggml_compute_forward_mul_f32(
GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
const int ith = params->ith;
const int nth = params->nth;
#if defined(GGML_USE_CLBLAST)
- if (src1->backend == GGML_BACKEND_GPU) {
+ if (src1->backend == GGML_BACKEND_TYPE_GPU) {
// TODO: OpenCL kernel support full broadcast
GGML_ASSERT(ggml_can_repeat_rows(src1, src0));
if (ith == 0) {
@@ -8365,7 +8365,7 @@ static void ggml_compute_forward_div_f32(
GGML_ASSERT(ggml_can_repeat(src1, src0) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8460,7 +8460,7 @@ static void ggml_compute_forward_sqr_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8506,7 +8506,7 @@ static void ggml_compute_forward_sqrt_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8552,7 +8552,7 @@ static void ggml_compute_forward_log_f32(
GGML_ASSERT(params->ith == 0);
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8598,7 +8598,7 @@ static void ggml_compute_forward_sum_f32(
assert(params->ith == 0);
assert(ggml_is_scalar(dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8633,7 +8633,7 @@ static void ggml_compute_forward_sum_f16(
assert(params->ith == 0);
assert(ggml_is_scalar(dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8690,7 +8690,7 @@ static void ggml_compute_forward_sum_rows_f32(
GGML_ASSERT(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8745,7 +8745,7 @@ static void ggml_compute_forward_mean_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8804,7 +8804,7 @@ static void ggml_compute_forward_argmax_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8855,7 +8855,7 @@ static void ggml_compute_forward_repeat_f32(
GGML_ASSERT(params->ith == 0);
GGML_ASSERT(ggml_can_repeat(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8900,7 +8900,7 @@ static void ggml_compute_forward_repeat_f16(
GGML_ASSERT(params->ith == 0);
GGML_ASSERT(ggml_can_repeat(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -8974,7 +8974,7 @@ static void ggml_compute_forward_repeat_back_f32(
GGML_ASSERT(params->ith == 0);
GGML_ASSERT(ggml_can_repeat(dst, src0));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9051,7 +9051,7 @@ static void ggml_compute_forward_concat_f32(
const struct ggml_tensor * src0 = dst->src[0];
const struct ggml_tensor * src1 = dst->src[1];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9123,7 +9123,7 @@ static void ggml_compute_forward_abs_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9169,7 +9169,7 @@ static void ggml_compute_forward_sgn_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9215,7 +9215,7 @@ static void ggml_compute_forward_neg_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9261,7 +9261,7 @@ static void ggml_compute_forward_step_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9307,7 +9307,7 @@ static void ggml_compute_forward_tanh_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9353,7 +9353,7 @@ static void ggml_compute_forward_elu_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9399,7 +9399,7 @@ static void ggml_compute_forward_relu_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9446,7 +9446,7 @@ static void ggml_compute_forward_gelu_f32(
GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9509,7 +9509,7 @@ static void ggml_compute_forward_gelu_quick_f32(
GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9572,7 +9572,7 @@ static void ggml_compute_forward_silu_f32(
GGML_ASSERT(ggml_is_contiguous_except_dim_1(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9633,7 +9633,7 @@ static void ggml_compute_forward_leaky_relu_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9686,7 +9686,7 @@ static void ggml_compute_forward_silu_back_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_are_same_shape(src0, grad));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9748,7 +9748,7 @@ static void ggml_compute_forward_hardswish_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9791,7 +9791,7 @@ static void ggml_compute_forward_hardsigmoid_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9837,7 +9837,7 @@ static void ggml_compute_forward_norm_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9912,7 +9912,7 @@ static void ggml_compute_forward_rms_norm_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -9983,7 +9983,7 @@ static void ggml_compute_forward_rms_norm_back_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst) && ggml_are_same_shape(src0, src1));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -10161,7 +10161,7 @@ static void ggml_compute_forward_group_norm_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -10328,7 +10328,7 @@ static void ggml_compute_forward_mul_mat(
#if defined(GGML_USE_CLBLAST)
if (ggml_cl_can_mul_mat(src0, src1, dst)) {
- if (params->ith == 0 && params->type == GGML_TASK_COMPUTE) {
+ if (params->ith == 0 && params->type == GGML_TASK_TYPE_COMPUTE) {
ggml_cl_mul_mat(src0, src1, dst, params->wdata, params->wsize);
}
return;
@@ -10341,7 +10341,7 @@ static void ggml_compute_forward_mul_mat(
const size_t desired_wsize = ne13*ne12*ne_plane*sizeof(float);
UNUSED(desired_wsize);
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (type != GGML_TYPE_F32) {
assert(params->wsize >= desired_wsize);
// parallelize by src0 rows
@@ -10364,7 +10364,7 @@ static void ggml_compute_forward_mul_mat(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -10402,7 +10402,7 @@ static void ggml_compute_forward_mul_mat(
}
#endif
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith != 0) {
return;
}
@@ -10426,7 +10426,7 @@ static void ggml_compute_forward_mul_mat(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -10583,7 +10583,7 @@ static void ggml_compute_forward_mul_mat_id(
#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ne11 + (i1)]
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith != 0) {
return;
}
@@ -10620,7 +10620,7 @@ static void ggml_compute_forward_mul_mat_id(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -10768,7 +10768,7 @@ static void ggml_compute_forward_out_prod_f32(
(ggml_is_contiguous(src1) || ggml_is_transposed(src1));
#endif
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
#if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) // gemm beta will zero dst
if (use_blas) {
return;
@@ -10781,7 +10781,7 @@ static void ggml_compute_forward_out_prod_f32(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -10961,7 +10961,7 @@ static void ggml_compute_forward_out_prod_q_f32(
// TODO: #if defined(GGML_USE_CUBLAS) ggml_cuda_out_prod
// TODO: #if defined(GGML_USE_ACCELERATE) || defined(GGML_USE_OPENBLAS) || defined(GGML_USE_CLBLAST)
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith != 0) {
return;
}
@@ -10969,7 +10969,7 @@ static void ggml_compute_forward_out_prod_q_f32(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11087,7 +11087,7 @@ static void ggml_compute_forward_scale_f32(
GGML_ASSERT(ggml_is_contiguous(dst));
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11159,7 +11159,7 @@ static void ggml_compute_forward_set_f32(
size_t offset = ((int32_t *) dst->op_params)[3];
bool inplace = (bool) ((int32_t *) dst->op_params)[4];
- if (!inplace && (params->type == GGML_TASK_INIT)) {
+ if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
if (params->ith != 0) {
return;
}
@@ -11171,7 +11171,7 @@ static void ggml_compute_forward_set_f32(
ggml_nbytes(dst));
}
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11319,7 +11319,7 @@ static void ggml_compute_forward_get_rows_q(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11359,7 +11359,7 @@ static void ggml_compute_forward_get_rows_f16(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11396,7 +11396,7 @@ static void ggml_compute_forward_get_rows_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11499,14 +11499,14 @@ static void ggml_compute_forward_get_rows_back_f32_f16(
// ggml_compute_forward_dup_same_cont(params, opt0, dst);
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (params->ith != 0) {
return;
}
memset(dst->data, 0, ggml_nbytes(dst));
}
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11538,14 +11538,14 @@ static void ggml_compute_forward_get_rows_back_f32(
// ggml_compute_forward_dup_same_cont(params, opt0, dst);
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (params->ith != 0) {
return;
}
memset(dst->data, 0, ggml_nbytes(dst));
}
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11615,7 +11615,7 @@ static void ggml_compute_forward_diag_f32(
GGML_ASSERT(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11684,7 +11684,7 @@ static void ggml_compute_forward_diag_mask_f32(
GGML_ASSERT(n_past >= 0);
- if (!inplace && (params->type == GGML_TASK_INIT)) {
+ if (!inplace && (params->type == GGML_TASK_TYPE_INIT)) {
if (ith != 0) {
return;
}
@@ -11698,7 +11698,7 @@ static void ggml_compute_forward_diag_mask_f32(
ggml_nbytes(dst));
}
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11772,7 +11772,7 @@ static void ggml_compute_forward_soft_max_f32(
assert(ggml_is_contiguous(dst));
assert(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -11910,7 +11910,7 @@ static void ggml_compute_forward_soft_max_back_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
GGML_ASSERT(ggml_are_same_shape(src1, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12004,7 +12004,7 @@ static void ggml_compute_forward_alibi_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12063,7 +12063,7 @@ static void ggml_compute_forward_alibi_f16(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12170,7 +12170,7 @@ static void ggml_compute_forward_clamp_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12310,7 +12310,7 @@ static void ggml_compute_forward_rope_f32(
const struct ggml_tensor * src0 = dst->src[0];
const struct ggml_tensor * src1 = dst->src[1];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12488,7 +12488,7 @@ static void ggml_compute_forward_rope_f16(
const struct ggml_tensor * src0 = dst->src[0];
const struct ggml_tensor * src1 = dst->src[1];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12719,7 +12719,7 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32(
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith != 0) {
return;
}
@@ -12759,7 +12759,7 @@ static void ggml_compute_forward_conv_transpose_1d_f16_f32(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12818,7 +12818,7 @@ static void ggml_compute_forward_conv_transpose_1d_f32(
GGML_ASSERT(nb00 == sizeof(float));
GGML_ASSERT(nb10 == sizeof(float));
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith != 0) {
return;
}
@@ -12858,7 +12858,7 @@ static void ggml_compute_forward_conv_transpose_1d_f32(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -12962,11 +12962,11 @@ static void ggml_compute_forward_im2col_f32(
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13050,11 +13050,11 @@ static void ggml_compute_forward_im2col_f16(
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13136,7 +13136,7 @@ static void ggml_compute_forward_conv_transpose_2d(
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith != 0) {
return;
}
@@ -13178,7 +13178,7 @@ static void ggml_compute_forward_conv_transpose_2d(
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13230,7 +13230,7 @@ static void ggml_compute_forward_pool_1d_sk_p0(
assert(src->type == GGML_TYPE_F32);
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13299,7 +13299,7 @@ static void ggml_compute_forward_pool_2d(
GGML_ASSERT(src->type == GGML_TYPE_F32);
GGML_ASSERT(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13372,7 +13372,7 @@ static void ggml_compute_forward_upscale_f32(
const struct ggml_tensor * src0 = dst->src[0];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13432,7 +13432,7 @@ static void ggml_compute_forward_pad_f32(
const struct ggml_tensor * src0 = dst->src[0];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13493,7 +13493,7 @@ static void ggml_compute_forward_argsort_f32(
const struct ggml_tensor * src0 = dst->src[0];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13519,8 +13519,8 @@ static void ggml_compute_forward_argsort_f32(
// C doesn't have a functional sort, so we do a bubble sort instead
for (int64_t j = 0; j < ne0; j++) {
for (int64_t k = j + 1; k < ne0; k++) {
- if ((order == GGML_SORT_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
- (order == GGML_SORT_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
+ if ((order == GGML_SORT_ORDER_ASC && src_data[dst_data[j]] > src_data[dst_data[k]]) ||
+ (order == GGML_SORT_ORDER_DESC && src_data[dst_data[j]] < src_data[dst_data[k]])) {
int32_t tmp = dst_data[j];
dst_data[j] = dst_data[k];
dst_data[k] = tmp;
@@ -13603,11 +13603,11 @@ static void ggml_compute_forward_flash_attn_f32(
GGML_ASSERT(nb1 <= nb2);
GGML_ASSERT(nb2 <= nb3);
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -13795,11 +13795,11 @@ static void ggml_compute_forward_flash_attn_f16(
GGML_ASSERT(nb1 <= nb2);
GGML_ASSERT(nb2 <= nb3);
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14054,11 +14054,11 @@ static void ggml_compute_forward_flash_ff_f16(
GGML_ASSERT(nb1 <= nb2);
GGML_ASSERT(nb2 <= nb3);
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14213,14 +14213,14 @@ static void ggml_compute_forward_flash_attn_back_f32(
GGML_ASSERT(nb1 <= nb2);
GGML_ASSERT(nb2 <= nb3);
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith == 0) {
memset(dst->data, 0, nb0*ne0*ne1*ne2*ne3);
}
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14536,7 +14536,7 @@ static void ggml_compute_forward_win_part_f32(
const struct ggml_tensor * src0 = dst->src[0];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14602,7 +14602,7 @@ static void ggml_compute_forward_win_unpart_f32(
const struct ggml_tensor * src0 = dst->src[0];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14730,7 +14730,7 @@ static void ggml_compute_forward_get_rel_pos_f16(
const struct ggml_tensor * src0 = dst->src[0];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14782,14 +14782,14 @@ static void ggml_compute_forward_add_rel_pos_f32(
const struct ggml_tensor * src2 = dst->src[2];
const bool inplace = (bool) ((int32_t *) dst->op_params)[0];
- if (!inplace && params->type == GGML_TASK_INIT) {
+ if (!inplace && params->type == GGML_TASK_TYPE_INIT) {
if (params->ith != 0) {
return;
}
memcpy((char *) dst->data, (char *) src0->data, ggml_nbytes(dst));
return;
}
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14871,7 +14871,7 @@ static void ggml_compute_forward_map_unary_f32(
GGML_ASSERT(ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14920,7 +14920,7 @@ static void ggml_compute_forward_map_binary_f32(
assert(params->ith == 0);
assert(ggml_are_same_shape(src0, src1) && ggml_are_same_shape(src0, dst));
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14969,7 +14969,7 @@ static void ggml_compute_forward_map_custom1_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -14988,7 +14988,7 @@ static void ggml_compute_forward_map_custom2_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -15008,7 +15008,7 @@ static void ggml_compute_forward_map_custom3_f32(
assert(params->ith == 0);
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -15023,7 +15023,7 @@ static void ggml_compute_forward_map_custom1(
const struct ggml_tensor * a = dst->src[0];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -15041,7 +15041,7 @@ static void ggml_compute_forward_map_custom2(
const struct ggml_tensor * a = dst->src[0];
const struct ggml_tensor * b = dst->src[1];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -15060,7 +15060,7 @@ static void ggml_compute_forward_map_custom3(
const struct ggml_tensor * b = dst->src[1];
const struct ggml_tensor * c = dst->src[2];
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -15094,14 +15094,14 @@ static void ggml_compute_forward_cross_entropy_loss_f32(
GGML_ASSERT(params->wsize >= sizeof(float) * (nth + nth * nc));
- if (params->type == GGML_TASK_INIT) {
+ if (params->type == GGML_TASK_TYPE_INIT) {
if (ith == 0) {
memset(sums, 0, sizeof(float) * (nth + nth * nc));
}
return;
}
- if (params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_FINALIZE) {
if (ith == 0) {
float * dp = (float *) dst->data;
ggml_vec_sum_f32(nth, dp, sums);
@@ -15216,7 +15216,7 @@ static void ggml_compute_forward_cross_entropy_loss_back_f32(
const int64_t ith = params->ith;
const int64_t nth = params->nth;
- if (params->type == GGML_TASK_INIT || params->type == GGML_TASK_FINALIZE) {
+ if (params->type == GGML_TASK_TYPE_INIT || params->type == GGML_TASK_TYPE_FINALIZE) {
return;
}
@@ -15323,8 +15323,8 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
if (skip_cpu) {
return;
}
- GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
- GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
+ GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_TYPE_CPU);
+ GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_TYPE_CPU);
#elif defined(GGML_USE_VULKAN)
const bool skip_cpu = ggml_vk_compute_forward_cpu_assist(params, tensor);
#ifdef GGML_VULKAN_CHECK_RESULTS
@@ -15335,8 +15335,8 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
if (skip_cpu) {
return;
}
- GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_CPU);
- GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_CPU);
+ GGML_ASSERT(tensor->src[0] == NULL || tensor->src[0]->backend == GGML_BACKEND_TYPE_CPU);
+ GGML_ASSERT(tensor->src[1] == NULL || tensor->src[1]->backend == GGML_BACKEND_TYPE_CPU);
#endif // GGML_USE_CUBLAS
#ifdef GGML_USE_SYCL
@@ -16882,7 +16882,7 @@ size_t ggml_graph_overhead(void) {
struct ggml_cgraph * ggml_new_graph_custom(struct ggml_context * ctx, size_t size, bool grads) {
const size_t obj_size = ggml_graph_nbytes(size, grads);
- struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_GRAPH, obj_size);
+ struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_GRAPH, obj_size);
struct ggml_cgraph * cgraph = (struct ggml_cgraph *) ((char *) ctx->mem_buffer + obj->offs);
struct ggml_tensor ** data_start = (struct ggml_tensor **) (cgraph + 1);
@@ -17429,7 +17429,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
set_numa_thread_affinity(state->ith);
int node_n = -1;
- int task_phase = GGML_TASK_FINALIZE;
+ int task_phase = GGML_TASK_TYPE_FINALIZE;
while (true) {
if (cplan->abort_callback && cplan->abort_callback(cplan->abort_callback_data)) {
@@ -17441,7 +17441,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
// all other threads are finished and spinning
// do finalize and init here so we don't have synchronize again
struct ggml_compute_params params = {
- /*.type =*/ GGML_TASK_FINALIZE,
+ /*.type =*/ GGML_TASK_TYPE_FINALIZE,
/*.ith =*/ 0,
/*.nth =*/ 0,
/*.wsize =*/ cplan->work_size,
@@ -17472,17 +17472,17 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
if (n_tasks == 1) {
/* INIT */
if (GGML_OP_HAS_INIT[node->op]) {
- params.type = GGML_TASK_INIT;
+ params.type = GGML_TASK_TYPE_INIT;
ggml_compute_forward(&params, node);
}
// TODO: maybe push node_n to the atomic but if other threads see n_tasks is 1,
// they do something more efficient than spinning (?)
- params.type = GGML_TASK_COMPUTE;
+ params.type = GGML_TASK_TYPE_COMPUTE;
ggml_compute_forward(&params, node);
if (GGML_OP_HAS_FINALIZE[node->op]) {
- params.type = GGML_TASK_FINALIZE;
+ params.type = GGML_TASK_TYPE_FINALIZE;
ggml_compute_forward(&params, node);
}
@@ -17496,7 +17496,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
}
}
- task_phase = GGML_TASK_INIT;
+ task_phase = GGML_TASK_TYPE_INIT;
atomic_store(&state->shared->n_active, n_threads);
atomic_store(&state->shared->node_n, node_n);
atomic_store(&state->shared->node_task, task_phase);
@@ -17513,7 +17513,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
const int n_tasks = ggml_get_n_tasks(node, n_threads);
struct ggml_compute_params params = {
- /*.type =*/ GGML_TASK_INIT,
+ /*.type =*/ GGML_TASK_TYPE_INIT,
/*.ith =*/ state->ith,
/*.nth =*/ n_tasks,
/*.wsize =*/ cplan->work_size,
@@ -17527,7 +17527,7 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
}
if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
- task_phase = GGML_TASK_COMPUTE;
+ task_phase = GGML_TASK_TYPE_COMPUTE;
atomic_store(&state->shared->n_active, n_threads);
atomic_store(&state->shared->node_task, task_phase);
}
@@ -17542,12 +17542,12 @@ static thread_ret_t ggml_graph_compute_thread(void * data) {
}
if (state->ith < n_tasks) {
- params.type = GGML_TASK_COMPUTE;
+ params.type = GGML_TASK_TYPE_COMPUTE;
ggml_compute_forward(&params, node);
}
if (atomic_fetch_sub(&state->shared->n_active, 1) == 1) {
- task_phase = GGML_TASK_FINALIZE;
+ task_phase = GGML_TASK_TYPE_FINALIZE;
atomic_store(&state->shared->n_active, n_threads);
atomic_store(&state->shared->node_task, task_phase);
}
@@ -17783,7 +17783,7 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
/*.n_threads =*/ n_threads,
/*.n_active =*/ n_threads,
/*.node_n =*/ -1,
- /*.node_task =*/ GGML_TASK_FINALIZE,
+ /*.node_task =*/ GGML_TASK_TYPE_FINALIZE,
/*.abort_callback =*/ NULL,
/*.abort_callback_data =*/ NULL,
};
@@ -17851,7 +17851,7 @@ int ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
void ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads);
- struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
+ struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
@@ -18659,7 +18659,7 @@ static enum ggml_opt_result ggml_opt_adam(
float * pf = params.past > 0 ? opt->adam.pf->data : NULL; // past function values
struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
- struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
+ struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
bool cancel = false;
@@ -18671,7 +18671,7 @@ static enum ggml_opt_result ggml_opt_adam(
if (callback) {
callback(callback_data, accum_step, &sched, &cancel);
if (cancel) {
- return GGML_OPT_CANCEL;
+ return GGML_OPT_RESULT_CANCEL;
}
}
// ggml_graph_reset (gf);
@@ -18762,7 +18762,7 @@ static enum ggml_opt_result ggml_opt_adam(
if (callback) {
callback(callback_data, accum_step, &sched, &cancel);
if (cancel) {
- return GGML_OPT_CANCEL;;
+ return GGML_OPT_RESULT_CANCEL;;
}
}
// ggml_graph_reset (gf);
@@ -18779,7 +18779,7 @@ static enum ggml_opt_result ggml_opt_adam(
if (fabsf(fx - fx_prev[0])/fx < params.adam.eps_f) {
GGML_PRINT_DEBUG("converged\n");
- return GGML_OPT_OK;
+ return GGML_OPT_RESULT_OK;
}
// delta-based convergence test
@@ -18789,7 +18789,7 @@ static enum ggml_opt_result ggml_opt_adam(
const float rate = (pf[(iter0 + t)%params.past] - fx)/fx;
if (fabsf(rate) < params.delta) {
- return GGML_OPT_OK;
+ return GGML_OPT_RESULT_OK;
}
}
@@ -18805,7 +18805,7 @@ static enum ggml_opt_result ggml_opt_adam(
++n_no_improvement[0];
if (n_no_improvement[0] >= params.max_no_improvement) {
- return GGML_OPT_OK;
+ return GGML_OPT_RESULT_OK;
}
}
}
@@ -18823,7 +18823,7 @@ static enum ggml_opt_result ggml_opt_adam(
}
}
- return GGML_OPT_DID_NOT_CONVERGE;
+ return GGML_OPT_RESULT_DID_NOT_CONVERGE;
}
//
@@ -18904,7 +18904,7 @@ static enum ggml_opt_result linesearch_backtracking(
float sched = 0;
callback(callback_data, accum_step, &sched, cancel);
if (*cancel) {
- return GGML_OPT_CANCEL;
+ return GGML_OPT_RESULT_CANCEL;
}
}
// ggml_graph_reset (gf);
@@ -18977,7 +18977,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
if (params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_WOLFE ||
params.lbfgs.linesearch == GGML_LINESEARCH_BACKTRACKING_STRONG_WOLFE) {
if (params.lbfgs.wolfe <= params.lbfgs.ftol || 1.f <= params.lbfgs.wolfe) {
- return GGML_OPT_INVALID_WOLFE;
+ return GGML_OPT_RESULT_INVALID_WOLFE;
}
}
@@ -19006,7 +19006,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
}
struct ggml_cplan cplan = ggml_graph_plan(gb, params.n_threads);
- struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_WORK_BUFFER, cplan.work_size);
+ struct ggml_object * obj = ggml_new_object(ctx, GGML_OBJECT_TYPE_WORK_BUFFER, cplan.work_size);
cplan.work_data = (uint8_t *)ctx->mem_buffer + obj->offs;
float * x = opt->lbfgs.x->data; // current parameters
@@ -19047,7 +19047,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
float sched = 0;
callback(callback_data, accum_step, &sched, &cancel);
if (cancel) {
- return GGML_OPT_CANCEL;
+ return GGML_OPT_RESULT_CANCEL;
}
}
// ggml_graph_reset (gf);
@@ -19075,7 +19075,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
// already optimized
if (gnorm/xnorm <= params.lbfgs.eps) {
- return GGML_OPT_OK;
+ return GGML_OPT_RESULT_OK;
}
if (opt->just_initialized) {
@@ -19120,7 +19120,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
// way to test and don't want to break something with so many changes lined up
ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
if (cancel) {
- return GGML_OPT_CANCEL;
+ return GGML_OPT_RESULT_CANCEL;
}
if (ls < 0) {
@@ -19143,7 +19143,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
}
if (gnorm/xnorm <= params.lbfgs.eps) {
// converged
- return GGML_OPT_OK;
+ return GGML_OPT_RESULT_OK;
}
// delta-based convergence test
@@ -19153,7 +19153,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
const float rate = (pf[k[0]%params.past] - fx)/fx;
if (fabsf(rate) < params.delta) {
- return GGML_OPT_OK;
+ return GGML_OPT_RESULT_OK;
}
}
@@ -19169,14 +19169,14 @@ static enum ggml_opt_result ggml_opt_lbfgs(
n_no_improvement[0]++;
if (n_no_improvement[0] >= params.max_no_improvement) {
- return GGML_OPT_OK;
+ return GGML_OPT_RESULT_OK;
}
}
}
if (params.lbfgs.n_iter != 0 && params.lbfgs.n_iter < it + 1) {
// reached the maximum number of iterations
- return GGML_OPT_DID_NOT_CONVERGE;
+ return GGML_OPT_RESULT_DID_NOT_CONVERGE;
}
// update vectors s and y:
@@ -19232,17 +19232,17 @@ static enum ggml_opt_result ggml_opt_lbfgs(
GGML_ASSERT(false && "lbfgs failed");
- return GGML_OPT_DID_NOT_CONVERGE;
+ return GGML_OPT_RESULT_DID_NOT_CONVERGE;
}
struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
struct ggml_opt_params result;
switch (type) {
- case GGML_OPT_ADAM:
+ case GGML_OPT_TYPE_ADAM:
{
result = (struct ggml_opt_params) {
- .type = GGML_OPT_ADAM,
+ .type = GGML_OPT_TYPE_ADAM,
.graph_size = GGML_DEFAULT_GRAPH_SIZE,
.n_threads = 1, // FIXME: GGML_DEFAULT_N_THREADS ?
.past = 0,
@@ -19270,10 +19270,10 @@ struct ggml_opt_params ggml_opt_default_params(enum ggml_opt_type type) {
},
};
} break;
- case GGML_OPT_LBFGS:
+ case GGML_OPT_TYPE_LBFGS:
{
result = (struct ggml_opt_params) {
- .type = GGML_OPT_LBFGS,
+ .type = GGML_OPT_TYPE_LBFGS,
.graph_size = GGML_DEFAULT_GRAPH_SIZE,
.n_threads = 1,
.past = 0,
@@ -19318,12 +19318,12 @@ GGML_API void ggml_opt_init(
opt->just_initialized = true;
if (opt->ctx == NULL) {
struct ggml_init_params ctx_opt_params;
- if (opt->params.type == GGML_OPT_ADAM) {
+ if (opt->params.type == GGML_OPT_TYPE_ADAM) {
ctx_opt_params.mem_size = GGML_MEM_ALIGN*3 + ggml_tensor_overhead()*3 + ggml_type_size(GGML_TYPE_F32)*nx*3;
if (opt->params.past > 0) {
ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
}
- } else if (opt->params.type == GGML_OPT_LBFGS) {
+ } else if (opt->params.type == GGML_OPT_TYPE_LBFGS) {
ctx_opt_params.mem_size = GGML_MEM_ALIGN*9 + ggml_tensor_overhead()*9 + ggml_type_size(GGML_TYPE_F32)*(nx*5 + opt->params.lbfgs.m*2 + nx*opt->params.lbfgs.m*2);
if (opt->params.past > 0) {
ctx_opt_params.mem_size += GGML_MEM_ALIGN + ggml_tensor_overhead() + ggml_type_size(GGML_TYPE_F32)*opt->params.past;
@@ -19335,7 +19335,7 @@ GGML_API void ggml_opt_init(
opt->ctx = ggml_init(ctx_opt_params);
}
switch (opt->params.type) {
- case GGML_OPT_ADAM:
+ case GGML_OPT_TYPE_ADAM:
{
opt->adam.g = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
opt->adam.m = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
@@ -19349,7 +19349,7 @@ GGML_API void ggml_opt_init(
ggml_set_zero(opt->adam.pf);
}
} break;
- case GGML_OPT_LBFGS:
+ case GGML_OPT_TYPE_LBFGS:
{
opt->lbfgs.x = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
opt->lbfgs.xp = ggml_new_tensor_1d(opt->ctx, GGML_TYPE_F32, nx);
@@ -19393,13 +19393,13 @@ enum ggml_opt_result ggml_opt(
ctx = ggml_init(params_ctx);
if (ctx == NULL) {
- return GGML_OPT_NO_CONTEXT;
+ return GGML_OPT_RESULT_NO_CONTEXT;
}
free_ctx = true;
}
- enum ggml_opt_result result = GGML_OPT_OK;
+ enum ggml_opt_result result = GGML_OPT_RESULT_OK;
struct ggml_opt_context * opt = (struct ggml_opt_context *) alloca(sizeof(struct ggml_opt_context));
@@ -19438,14 +19438,14 @@ enum ggml_opt_result ggml_opt_resume_g(
void * callback_data) {
// build forward + backward compute graphs
- enum ggml_opt_result result = GGML_OPT_OK;
+ enum ggml_opt_result result = GGML_OPT_RESULT_OK;
switch (opt->params.type) {
- case GGML_OPT_ADAM:
+ case GGML_OPT_TYPE_ADAM:
{
result = ggml_opt_adam(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
} break;
- case GGML_OPT_LBFGS:
+ case GGML_OPT_TYPE_LBFGS:
{
result = ggml_opt_lbfgs(ctx, opt, opt->params, f, gf, gb, callback, callback_data);
} break;