summaryrefslogtreecommitdiff
path: root/ggml.c
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-08-28 14:24:53 +0300
committerGitHub <noreply@github.com>2023-08-28 14:24:53 +0300
commit35feac6560387cf0484371af3d9b12bff678e0b9 (patch)
tree09c321461d0c5a14b2a3aefa6c6b06f2532c94d9 /ggml.c
parent92b1bbd2ec43c82ec0530ba3c8758846c5790c75 (diff)
ggml : sync (mem align to header + conv_transpose_2d fixes + ggml_alloc) (#2852)
* ggml : sync (mem align to header + conv_transpose_2d fixes) ggml-ci * ggml-alloc : minor fix * ggml-alloc : sync more fixes
Diffstat (limited to 'ggml.c')
-rw-r--r--ggml.c22
1 files changed, 8 insertions, 14 deletions
diff --git a/ggml.c b/ggml.c
index 54f426bc..dadb3075 100644
--- a/ggml.c
+++ b/ggml.c
@@ -157,12 +157,6 @@ typedef void * thread_ret_t;
//#define GGML_SOFT_MAX_ACCELERATE
#endif
-#if UINTPTR_MAX == 0xFFFFFFFF
- #define GGML_MEM_ALIGN 4
-#else
- #define GGML_MEM_ALIGN 16
-#endif
-
//
// logging
//
@@ -7098,11 +7092,13 @@ struct ggml_tensor * ggml_conv_transpose_2d_p0(
};
struct ggml_tensor* result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
+
+ ggml_set_op_params_i32(result, 0, stride);
+
result->op = GGML_OP_CONV_TRANSPOSE_2D;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
result->src[1] = b;
- result->src[2] = ggml_new_i32(ctx, stride);
return result;
}
@@ -13498,7 +13494,6 @@ static void ggml_compute_forward_conv_transpose_2d(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
- const struct ggml_tensor * opt0,
struct ggml_tensor * dst) {
GGML_ASSERT(src0->type == GGML_TYPE_F16);
GGML_ASSERT(src1->type == GGML_TYPE_F32);
@@ -13558,7 +13553,7 @@ static void ggml_compute_forward_conv_transpose_2d(
return;
}
- const int32_t stride = ((const int32_t*)(opt0->data))[0];
+ const int32_t stride = ggml_get_op_params_i32(dst, 0);
// total patches in dst
const int np = ne2;
@@ -13571,7 +13566,7 @@ static void ggml_compute_forward_conv_transpose_2d(
const int ip1 = MIN(ip0 + dp, np);
ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
- ggml_fp16_t * const wdata_src = (ggml_fp16_t *) params->wdata + nk;
+ ggml_fp16_t * const wdata_src = wdata + nk;
for (int i2 = ip0; i2 < ip1; i2++) { // Cout
float * dst_data = (float *)((char *) dst->data + i2*nb2);
@@ -13583,9 +13578,8 @@ static void ggml_compute_forward_conv_transpose_2d(
for (int i00 = 0; i00 < ne00; i00++) {
float v = 0;
ggml_vec_dot_f16(ne03, &v,
- (ggml_fp16_t *) wdata_src + i1n,
- (ggml_fp16_t *) wdata_kernel + i01*ne00*ne03 + i00*ne03);
-
+ wdata_src + i1n,
+ wdata_kernel + i01*ne00*ne03 + i00*ne03);
dst_data[(i11*stride + i01)*ne0 + i10*stride + i00] += v;
}
}
@@ -15732,7 +15726,7 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
} break;
case GGML_OP_CONV_TRANSPOSE_2D:
{
- ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor->src[2], tensor);
+ ggml_compute_forward_conv_transpose_2d(params, tensor->src[0], tensor->src[1], tensor);
} break;
case GGML_OP_POOL_1D:
{