summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-10-04 15:29:58 +0300
committerGitHub <noreply@github.com>2023-10-04 15:29:58 +0300
commitf93af02488179b9c52d0d391b08ae4c4d891b8d3 (patch)
treef5dec5ce7e832e4c5a6d40bb76ef9865e27488ac
parentf72f8f22c9cb60465b2e79df2767e4ba9604e576 (diff)
sync : ggml (conv 1d + 2d updates, UB fixes) (#3468)
* sync : ggml (conv 1d + 2d updates) ggml-ci * ggml : fix UB in q5_0 and q5_1 quantize code ggml.c:1033:39: runtime error: left shift of 1 by 31 places cannot be represented in type 'int' SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior ggml.c:1081:39: runtime error: left shift of 1 by 31 places cannot be represented in type 'int' SUMMARY: UndefinedBehaviorSanitizer: undefined-behavior ggml-ci * tests : fix UB in test-quantize-perf
-rw-r--r--ggml.c817
-rw-r--r--ggml.h13
-rw-r--r--k_quants.c2
-rw-r--r--tests/test-grad0.cpp20
-rw-r--r--tests/test-opt.cpp29
-rw-r--r--tests/test-quantize-perf.cpp29
6 files changed, 628 insertions, 282 deletions
diff --git a/ggml.c b/ggml.c
index dd1d00bc..4a94b0f3 100644
--- a/ggml.c
+++ b/ggml.c
@@ -1032,8 +1032,8 @@ static void quantize_row_q5_0_reference(const float * restrict x, block_q5_0 * r
y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
// get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
}
memcpy(&y[i].qh, &qh, sizeof(qh));
@@ -1080,8 +1080,8 @@ static void quantize_row_q5_1_reference(const float * restrict x, block_q5_1 * r
y[i].qs[j] = (xi0 & 0x0F) | ((xi1 & 0x0F) << 4);
// get the 5-th bit and store it in qh at the right position
- qh |= ((xi0 & 0x10) >> 4) << (j + 0);
- qh |= ((xi1 & 0x10) >> 4) << (j + qk/2);
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + qk/2);
}
memcpy(&y[i].qh, &qh, sizeof(y[i].qh));
@@ -4081,12 +4081,16 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
"ALIBI",
"CLAMP",
"CONV_1D",
+ "CONV_TRANSPOSE_1D",
"CONV_2D",
"CONV_TRANSPOSE_2D",
"POOL_1D",
"POOL_2D",
"UPSCALE",
+ "CONV_1D_STAGE_0",
+ "CONV_1D_STAGE_1",
+
"FLASH_ATTN",
"FLASH_FF",
"FLASH_ATTN_BACK",
@@ -4112,7 +4116,7 @@ static const char * GGML_OP_NAME[GGML_OP_COUNT] = {
"CROSS_ENTROPY_LOSS_BACK",
};
-static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
+static_assert(GGML_OP_COUNT == 71, "GGML_OP_COUNT != 71");
static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
"none",
@@ -4163,12 +4167,16 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
"alibi(x)",
"clamp(x)",
"conv_1d(x)",
+ "conv_transpose_1d(x)",
"conv_2d(x)",
"conv_transpose_2d(x)",
"pool_1d(x)",
"pool_2d(x)",
"upscale(x)",
+ "conv_1d_stage_0(x)",
+ "conv_1d_stage_1(x)",
+
"flash_attn(x)",
"flash_ff(x)",
"flash_attn_back(x)",
@@ -4194,7 +4202,7 @@ static const char * GGML_OP_SYMBOL[GGML_OP_COUNT] = {
"cross_entropy_loss_back(x,y)",
};
-static_assert(GGML_OP_COUNT == 68, "GGML_OP_COUNT != 68");
+static_assert(GGML_OP_COUNT == 71, "GGML_OP_COUNT != 71");
static_assert(GGML_OP_POOL_COUNT == 2, "GGML_OP_POOL_COUNT != 2");
@@ -4223,7 +4231,10 @@ static void ggml_setup_op_has_task_pass(void) {
p[GGML_OP_DIAG_MASK_INF ] = true;
p[GGML_OP_DIAG_MASK_ZERO ] = true;
p[GGML_OP_CONV_1D ] = true;
+ p[GGML_OP_CONV_1D_STAGE_0 ] = true;
+ p[GGML_OP_CONV_1D_STAGE_1 ] = true;
p[GGML_OP_CONV_2D ] = true;
+ p[GGML_OP_CONV_TRANSPOSE_1D ] = true;
p[GGML_OP_CONV_TRANSPOSE_2D ] = true;
p[GGML_OP_FLASH_ATTN_BACK ] = true;
p[GGML_OP_CROSS_ENTROPY_LOSS ] = true;
@@ -6746,7 +6757,6 @@ struct ggml_tensor * ggml_cont_4d(
return result;
}
-
// ggml_reshape
struct ggml_tensor * ggml_reshape(
@@ -7504,14 +7514,17 @@ static int64_t ggml_calc_conv_output_size(int64_t ins, int64_t ks, int s, int p,
return (ins + 2 * p - d * (ks - 1) - 1) / s + 1;
}
-GGML_API struct ggml_tensor * ggml_conv_1d(
- struct ggml_context * ctx,
- struct ggml_tensor * a,
- struct ggml_tensor * b,
- int s0,
- int p0,
- int d0) {
- GGML_ASSERT(ggml_is_matrix(b));
+// im2col: [N, IC, IL] => [N, OL, IC*K]
+// a: [OC,IC, K]
+// b: [N, IC, IL]
+// result: [N, OL, IC*K]
+static struct ggml_tensor * ggml_conv_1d_stage_0(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0) {
GGML_ASSERT(a->ne[1] == b->ne[1]);
bool is_node = false;
@@ -7520,16 +7533,54 @@ GGML_API struct ggml_tensor * ggml_conv_1d(
is_node = true;
}
+ const int64_t OL = ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0);
+
const int64_t ne[4] = {
- ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
- a->ne[2], 1, 1,
+ a->ne[1] * a->ne[0],
+ OL,
+ b->ne[2],
+ 1,
};
- struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F16, 4, ne);
int32_t params[] = { s0, p0, d0 };
ggml_set_op_params(result, params, sizeof(params));
- result->op = GGML_OP_CONV_1D;
+ result->op = GGML_OP_CONV_1D_STAGE_0;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src[0] = a;
+ result->src[1] = b;
+
+ return result;
+}
+
+// ggml_conv_1d_stage_1
+
+// gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
+// a: [OC, IC, K]
+// b: [N, OL, IC * K]
+// result: [N, OC, OL]
+static struct ggml_tensor * ggml_conv_1d_stage_1(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b) {
+
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ const int64_t ne[4] = {
+ b->ne[1],
+ a->ne[2],
+ b->ne[2],
+ 1,
+ };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
+
+ result->op = GGML_OP_CONV_1D_STAGE_1;
result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
result->src[0] = a;
result->src[1] = b;
@@ -7537,6 +7588,53 @@ GGML_API struct ggml_tensor * ggml_conv_1d(
return result;
}
+// ggml_conv_1d
+
+GGML_API struct ggml_tensor * ggml_conv_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0) {
+ struct ggml_tensor * result = ggml_conv_1d_stage_0(ctx, a, b, s0, p0, d0);
+ result = ggml_conv_1d_stage_1(ctx, a, result);
+ return result;
+}
+
+// GGML_API struct ggml_tensor * ggml_conv_1d(
+// struct ggml_context * ctx,
+// struct ggml_tensor * a,
+// struct ggml_tensor * b,
+// int s0,
+// int p0,
+// int d0) {
+// GGML_ASSERT(ggml_is_matrix(b));
+// GGML_ASSERT(a->ne[1] == b->ne[1]);
+// bool is_node = false;
+
+// if (a->grad || b->grad) {
+// GGML_ASSERT(false); // TODO: implement backward
+// is_node = true;
+// }
+
+// const int64_t ne[4] = {
+// ggml_calc_conv_output_size(b->ne[0], a->ne[0], s0, p0, d0),
+// a->ne[2], 1, 1,
+// };
+// struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 2, ne);
+
+// int32_t params[] = { s0, p0, d0 };
+// ggml_set_op_params(result, params, sizeof(params));
+
+// result->op = GGML_OP_CONV_1D;
+// result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+// result->src[0] = a;
+// result->src[1] = b;
+
+// return result;
+// }
+
// ggml_conv_1d_ph
struct ggml_tensor* ggml_conv_1d_ph(
@@ -7548,6 +7646,50 @@ struct ggml_tensor* ggml_conv_1d_ph(
return ggml_conv_1d(ctx, a, b, s, a->ne[0] / 2, d);
}
+// ggml_conv_transpose_1d
+
+static int64_t ggml_calc_conv_transpose_1d_output_size(int64_t ins, int64_t ks, int s, int p, int d) {
+ return (ins - 1) * s - 2 * p + d * (ks - 1) + 1;
+}
+
+GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0) {
+ GGML_ASSERT(ggml_is_matrix(b));
+ GGML_ASSERT(a->ne[2] == b->ne[1]);
+ GGML_ASSERT(a->ne[3] == 1);
+
+ GGML_ASSERT(p0 == 0);
+ GGML_ASSERT(d0 == 1);
+
+ bool is_node = false;
+
+ if (a->grad || b->grad) {
+ GGML_ASSERT(false); // TODO: implement backward
+ is_node = true;
+ }
+
+ const int64_t ne[4] = {
+ ggml_calc_conv_transpose_1d_output_size(b->ne[0], a->ne[0], s0, 0 /*p0*/, 1 /*d0*/),
+ a->ne[1], b->ne[2], 1,
+ };
+ struct ggml_tensor * result = ggml_new_tensor(ctx, GGML_TYPE_F32, 4, ne);
+
+ int32_t params[] = { s0, p0, d0 };
+ ggml_set_op_params(result, params, sizeof(params));
+
+ result->op = GGML_OP_CONV_TRANSPOSE_1D;
+ result->grad = is_node ? ggml_dup_tensor(ctx, result) : NULL;
+ result->src[0] = a;
+ result->src[1] = b;
+
+ return result;
+}
+
// ggml_conv_2d
struct ggml_tensor * ggml_conv_2d(
@@ -13687,7 +13829,7 @@ static void ggml_compute_forward_rope_back(
// ggml_compute_forward_conv_1d
-static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
+static void ggml_compute_forward_conv_1d_f16_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
@@ -13705,42 +13847,33 @@ static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
const int nth = params->nth;
const int nk = ne00;
- const int nh = nk/2;
- const int ew0 = ggml_up32(ne01);
+ // size of the convolution row - the kernel size unrolled across all input channels
+ const int ew0 = nk*ne01;
+
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+ const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
- {
- ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- for (int64_t i01 = 0; i01 < ne01; i01++) {
- const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
- ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
- for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
- }
- }
- }
- }
+ for (int64_t i11 = 0; i11 < ne11; i11++) {
+ const float * const src = (float *)((char *) src1->data + i11*nb11);
+ ggml_fp16_t * dst_data = wdata;
- // prepare source data (src1)
- {
- ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
+ for (int64_t i0 = 0; i0 < ne0; i0++) {
+ for (int64_t ik = 0; ik < nk; ik++) {
+ const int idx0 = i0*s0 + ik*d0 - p0;
- for (int64_t i11 = 0; i11 < ne11; i11++) {
- const float * const src = (float *)((char *) src1->data + i11*nb11);
- ggml_fp16_t * dst_data = wdata;
- for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
+ if(!(idx0 < 0 || idx0 >= ne10)) {
+ dst_data[i0*ew0 + i11*nk + ik] = GGML_FP32_TO_FP16(src[idx0]);
+ }
}
}
}
@@ -13753,7 +13886,7 @@ static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
}
// total rows in dst
- const int nr = ne02;
+ const int nr = ne2;
// rows per thread
const int dr = (nr + nth - 1)/nth;
@@ -13762,23 +13895,22 @@ static void ggml_compute_forward_conv_1d_s1_ph_f16_f32(
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
- for (int i1 = ir0; i1 < ir1; i1++) {
- float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; ++i0) {
- dst_data[i0] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f16(ew0, &v,
- (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0] += v;
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+
+ for (int i2 = 0; i2 < ne2; i2++) {
+ for (int i1 = ir0; i1 < ir1; i1++) {
+ float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1);
+
+ for (int i0 = 0; i0 < ne0; i0++) {
+ ggml_vec_dot_f16(ew0, dst_data + i0,
+ (ggml_fp16_t *) ((char *) src0->data + i1*nb02),
+ (ggml_fp16_t *) wdata + i2*nb2 + i0*ew0);
}
}
}
}
-static void ggml_compute_forward_conv_1d_s1_ph_f32(
+static void ggml_compute_forward_conv_1d_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
@@ -13796,42 +13928,32 @@ static void ggml_compute_forward_conv_1d_s1_ph_f32(
const int nth = params->nth;
const int nk = ne00;
- const int nh = nk/2;
- const int ew0 = ggml_up32(ne01);
+ const int ew0 = nk*ne01;
+
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+ const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(float));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
- {
- float * const wdata = (float *) params->wdata + 0;
+ float * const wdata = (float *) params->wdata + 0;
- for (int64_t i02 = 0; i02 < ne02; i02++) {
- for (int64_t i01 = 0; i01 < ne01; i01++) {
- const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
- float * dst_data = wdata + i02*ew0*ne00;
- for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
- }
- }
- }
- }
+ for (int64_t i11 = 0; i11 < ne11; i11++) {
+ const float * const src = (float *)((char *) src1->data + i11*nb11);
+ float * dst_data = wdata;
- // prepare source data (src1)
- {
- float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
+ for (int64_t i0 = 0; i0 < ne0; i0++) {
+ for (int64_t ik = 0; ik < nk; ik++) {
+ const int idx0 = i0*s0 + ik*d0 - p0;
- for (int64_t i11 = 0; i11 < ne11; i11++) {
- const float * const src = (float *)((char *) src1->data + i11*nb11);
- float * dst_data = wdata;
- for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = src[i10];
+ if(!(idx0 < 0 || idx0 >= ne10)) {
+ dst_data[i0*ew0 + i11*nk + ik] = src[idx0];
+ }
}
}
}
@@ -13853,35 +13975,242 @@ static void ggml_compute_forward_conv_1d_s1_ph_f32(
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
- for (int i1 = ir0; i1 < ir1; i1++) {
- float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; ++i0) {
- dst_data[i0] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f32(ew0, &v,
- (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0] += v;
+ float * const wdata = (float *) params->wdata + 0;
+
+ for (int i2 = 0; i2 < ne2; i2++) {
+ for (int i1 = ir0; i1 < ir1; i1++) {
+ float * dst_data = (float *)((char *) dst->data + i2*nb2 + i1*nb1);
+
+ for (int i0 = 0; i0 < ne0; i0++) {
+ ggml_vec_dot_f32(ew0, dst_data + i0,
+ (float *) ((char *) src0->data + i1*nb02),
+ (float *) wdata + i2*nb2 + i0*ew0);
+ }
+ }
+ }
+}
+
+static void gemm_f16_out_f32(int64_t m, int64_t n, int64_t k,
+ ggml_fp16_t * A,
+ ggml_fp16_t * B,
+ float * C,
+ const int ith, const int nth) {
+ // does not seem to make a difference
+ int64_t m0, m1, n0, n1;
+ // patches per thread
+ if (m > n) {
+ n0 = 0;
+ n1 = n;
+
+ // total patches in dst
+ const int np = m;
+
+ // patches per thread
+ const int dp = (np + nth - 1)/nth;
+
+ // patch range for this thread
+ m0 = dp*ith;
+ m1 = MIN(m0 + dp, np);
+ } else {
+ m0 = 0;
+ m1 = m;
+
+ // total patches in dst
+ const int np = n;
+
+ // patches per thread
+ const int dp = (np + nth - 1)/nth;
+
+ // patch range for this thread
+ n0 = dp*ith;
+ n1 = MIN(n0 + dp, np);
+ }
+
+ // block-tiling attempt
+ int64_t blck_n = 16;
+ int64_t blck_m = 16;
+
+ // int64_t CACHE_SIZE = 2 * 1024 * 1024; // 2MB
+ // int64_t blck_size = CACHE_SIZE / (sizeof(float) + 2 * sizeof(ggml_fp16_t) * K);
+ // if (blck_size > 0) {
+ // blck_0 = 4;
+ // blck_1 = blck_size / blck_0;
+ // if (blck_1 < 0) {
+ // blck_1 = 1;
+ // }
+ // // blck_0 = (int64_t)sqrt(blck_size);
+ // // blck_1 = blck_0;
+ // }
+ // // printf("%zd %zd %zd %zd\n", blck_size, K, blck_0, blck_1);
+
+ for (int j = n0; j < n1; j+=blck_n) {
+ for (int i = m0; i < m1; i+=blck_m) {
+ // printf("i j k => %d %d %d\n", i, j, K);
+ for (int ii = i; ii < i + blck_m && ii < m1; ii++) {
+ for (int jj = j; jj < j + blck_n && jj < n1; jj++) {
+ ggml_vec_dot_f16(k,
+ C + ii*n + jj,
+ A + ii * k,
+ B + jj * k);
+ }
}
}
}
}
-static void ggml_compute_forward_conv_1d_s1_ph(
+// src0: kernel [OC, IC, K]
+// src1: signal [N, IC, IL]
+// dst: result [N, OL, IC*K]
+static void ggml_compute_forward_conv_1d_stage_0_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
struct ggml_tensor * dst) {
- switch (src0->type) {
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F16);
+
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ GGML_TENSOR_BINARY_OP_LOCALS;
+
+ const int64_t N = ne12;
+ const int64_t IC = ne11;
+ const int64_t IL = ne10;
+
+ const int64_t K = ne00;
+
+ const int64_t OL = ne1;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+ const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
+
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb10 == sizeof(float));
+
+ if (params->type == GGML_TASK_INIT) {
+ memset(dst->data, 0, ggml_nbytes(dst));
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ // im2col: [N, IC, IL] => [N, OL, IC*K]
+ {
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) dst->data;
+
+ for (int64_t in = 0; in < N; in++) {
+ for (int64_t iol = 0; iol < OL; iol++) {
+ for (int64_t iic = ith; iic < IC; iic+=nth) {
+
+ // micro kernel
+ ggml_fp16_t * dst_data = wdata + (in*OL + iol)*(IC*K); // [IC, K]
+ const float * const src_data = (float *)((char *) src1->data + in*nb12 + iic*nb11); // [IL]
+
+ for (int64_t ik = 0; ik < K; ik++) {
+ const int64_t iil = iol*s0 + ik*d0 - p0;
+
+ if (!(iil < 0 || iil >= IL)) {
+ dst_data[iic*K + ik] = GGML_FP32_TO_FP16(src_data[iil]);
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+// gemm: [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
+// src0: [OC, IC, K]
+// src1: [N, OL, IC * K]
+// result: [N, OC, OL]
+static void ggml_compute_forward_conv_1d_stage_1_f16(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F16);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+
+ int64_t t0 = ggml_perf_time_us();
+ UNUSED(t0);
+
+ if (params->type == GGML_TASK_INIT) {
+ return;
+ }
+
+ if (params->type == GGML_TASK_FINALIZE) {
+ return;
+ }
+
+ GGML_TENSOR_BINARY_OP_LOCALS;
+
+ GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb10 == sizeof(ggml_fp16_t));
+ GGML_ASSERT(nb0 == sizeof(float));
+
+ const int N = ne12;
+ const int OL = ne11;
+
+ const int OC = ne02;
+ const int IC = ne01;
+ const int K = ne00;
+
+ const int ith = params->ith;
+ const int nth = params->nth;
+
+ int64_t m = OC;
+ int64_t n = OL;
+ int64_t k = IC * K;
+
+ // [N, OC, OL] = [OC, IC * K] x [N*OL, IC * K]
+ for (int i = 0; i < N; i++) {
+ ggml_fp16_t * A = (ggml_fp16_t *)src0->data; // [m, k]
+ ggml_fp16_t * B = (ggml_fp16_t *)src1->data + i * m * k; // [n, k]
+ float * C = (float *)dst->data + i * m * n; // [m, n]
+
+ gemm_f16_out_f32(m, n, k, A, B, C, ith, nth);
+ }
+}
+
+static void ggml_compute_forward_conv_1d(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch(src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_conv_1d_s1_ph_f16_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_1d_f16_f32(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_conv_1d_s1_ph_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_1d_f32(params, src0, src1, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+static void ggml_compute_forward_conv_1d_stage_0(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch(src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_conv_1d_stage_0_f32(params, src0, src1, dst);
} break;
default:
{
@@ -13890,7 +14219,26 @@ static void ggml_compute_forward_conv_1d_s1_ph(
}
}
-static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
+static void ggml_compute_forward_conv_1d_stage_1(
+ const struct ggml_compute_params * params,
+ const struct ggml_tensor * src0,
+ const struct ggml_tensor * src1,
+ struct ggml_tensor * dst) {
+ switch(src0->type) {
+ case GGML_TYPE_F16:
+ {
+ ggml_compute_forward_conv_1d_stage_1_f16(params, src0, src1, dst);
+ } break;
+ default:
+ {
+ GGML_ASSERT(false);
+ } break;
+ }
+}
+
+// ggml_compute_forward_conv_transpose_1d
+
+static void ggml_compute_forward_conv_transpose_1d_f16_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
@@ -13907,43 +14255,38 @@ static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
const int ith = params->ith;
const int nth = params->nth;
- const int nk = ne00;
- const int nh = nk/2;
-
- const int ew0 = ggml_up32(ne01);
+ const int nk = ne00*ne01*ne02;
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(ggml_fp16_t));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
+ // permute kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
{
ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
for (int64_t i02 = 0; i02 < ne02; i02++) {
for (int64_t i01 = 0; i01 < ne01; i01++) {
const ggml_fp16_t * const src = (ggml_fp16_t *)((char *) src0->data + i02*nb02 + i01*nb01);
- ggml_fp16_t * dst_data = wdata + i02*ew0*ne00;
+ ggml_fp16_t * dst_data = wdata + i01*ne00*ne02;
for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
+ dst_data[i00*ne02 + i02] = src[i00];
}
}
}
}
- // prepare source data (src1)
+ // permute source data (src1) from (L x Cin) to (Cin x L)
{
- ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + ne02*ew0*ne00;
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + nk;
+ ggml_fp16_t * dst_data = wdata;
for (int64_t i11 = 0; i11 < ne11; i11++) {
const float * const src = (float *)((char *) src1->data + i11*nb11);
- ggml_fp16_t * dst_data = wdata;
for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = GGML_FP32_TO_FP16(src[i10]);
+ dst_data[i10*ne11 + i11] = GGML_FP32_TO_FP16(src[i10]);
}
}
}
@@ -13955,8 +14298,10 @@ static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
return;
}
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+
// total rows in dst
- const int nr = ne02;
+ const int nr = ne1;
// rows per thread
const int dr = (nr + nth - 1)/nth;
@@ -13965,23 +14310,26 @@ static void ggml_compute_forward_conv_1d_s2_ph_f16_f32(
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
+ ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
+ ggml_fp16_t * const wdata_src = wdata + nk;
+
for (int i1 = ir0; i1 < ir1; i1++) {
float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
- dst_data[i0/2] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f16(ew0, &v,
- (ggml_fp16_t *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (ggml_fp16_t *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0/2] += v;
+ ggml_fp16_t * wdata_kernel = wdata + i1*ne02*ne00;
+ for (int i10 = 0; i10 < ne10; i10++) {
+ const int i1n = i10*ne11;
+ for (int i00 = 0; i00 < ne00; i00++) {
+ float v = 0;
+ ggml_vec_dot_f16(ne02, &v,
+ (ggml_fp16_t *) wdata_src + i1n,
+ (ggml_fp16_t *) wdata_kernel + i00*ne02);
+ dst_data[i10*s0 + i00] += v;
}
}
}
}
-static void ggml_compute_forward_conv_1d_s2_ph_f32(
+static void ggml_compute_forward_conv_transpose_1d_f32(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
@@ -13998,29 +14346,24 @@ static void ggml_compute_forward_conv_1d_s2_ph_f32(
const int ith = params->ith;
const int nth = params->nth;
- const int nk = ne00;
- const int nh = nk/2;
-
- const int ew0 = ggml_up32(ne01);
+ const int nk = ne00*ne01*ne02;
- GGML_ASSERT(ne00 % 2 == 1); // TODO: support even kernel sizes
GGML_ASSERT(nb00 == sizeof(float));
GGML_ASSERT(nb10 == sizeof(float));
if (params->type == GGML_TASK_INIT) {
- // TODO: fix this memset (wsize is overestimated)
memset(params->wdata, 0, params->wsize);
- // prepare kernel data (src0)
+ // prepare kernel data (src0) from (K x Cout x Cin) to (Cin x K x Cout)
{
float * const wdata = (float *) params->wdata + 0;
for (int64_t i02 = 0; i02 < ne02; i02++) {
for (int64_t i01 = 0; i01 < ne01; i01++) {
const float * const src = (float *)((char *) src0->data + i02*nb02 + i01*nb01);
- float * dst_data = wdata + i02*ew0*ne00;
+ float * dst_data = wdata + i01*ne00*ne02;
for (int64_t i00 = 0; i00 < ne00; i00++) {
- dst_data[i00*ew0 + i01] = src[i00];
+ dst_data[i01*ne00*ne02 + i00*ne02 + i02] = src[i00];
}
}
}
@@ -14028,13 +14371,13 @@ static void ggml_compute_forward_conv_1d_s2_ph_f32(
// prepare source data (src1)
{
- float * const wdata = (float *) params->wdata + ne02*ew0*ne00;
+ float * const wdata = (float *) params->wdata + nk;
+ float * dst_data = wdata;
for (int64_t i11 = 0; i11 < ne11; i11++) {
const float * const src = (float *)((char *) src1->data + i11*nb11);
- float * dst_data = wdata;
for (int64_t i10 = 0; i10 < ne10; i10++) {
- dst_data[(i10 + nh)*ew0 + i11] = src[i10];
+ dst_data[i10*ne11 + i11] = src[i10];
}
}
}
@@ -14046,8 +14389,10 @@ static void ggml_compute_forward_conv_1d_s2_ph_f32(
return;
}
+ const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
+
// total rows in dst
- const int nr = ne02;
+ const int nr = ne1;
// rows per thread
const int dr = (nr + nth - 1)/nth;
@@ -14056,23 +14401,26 @@ static void ggml_compute_forward_conv_1d_s2_ph_f32(
const int ir0 = dr*ith;
const int ir1 = MIN(ir0 + dr, nr);
+ float * const wdata = (float *) params->wdata + 0;
+ float * const wdata_src = wdata + nk;
+
for (int i1 = ir0; i1 < ir1; i1++) {
float * dst_data = (float *)((char *) dst->data + i1*nb1);
- for (int64_t i0 = 0; i0 < ne10; i0 += 2) {
- dst_data[i0/2] = 0;
- for (int k = -nh; k <= nh; k++) {
- float v = 0.0f;
- ggml_vec_dot_f32(ew0, &v,
- (float *) params->wdata + i1*ew0*ne00 + (nh + k)*ew0,
- (float *) params->wdata + ne02*ew0*ne00 + (i0 + nh + k)*ew0);
-
- dst_data[i0/2] += v;
+ float * wdata_kernel = wdata + i1*ne02*ne00;
+ for (int i10 = 0; i10 < ne10; i10++) {
+ const int i1n = i10*ne11;
+ for (int i00 = 0; i00 < ne00; i00++) {
+ float v = 0;
+ ggml_vec_dot_f32(ne02, &v,
+ wdata_src + i1n,
+ wdata_kernel + i00*ne02);
+ dst_data[i10*s0 + i00] += v;
}
}
}
}
-static void ggml_compute_forward_conv_1d_s2_ph(
+static void ggml_compute_forward_conv_transpose_1d(
const struct ggml_compute_params * params,
const struct ggml_tensor * src0,
const struct ggml_tensor * src1,
@@ -14080,11 +14428,11 @@ static void ggml_compute_forward_conv_1d_s2_ph(
switch (src0->type) {
case GGML_TYPE_F16:
{
- ggml_compute_forward_conv_1d_s2_ph_f16_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_transpose_1d_f16_f32(params, src0, src1, dst);
} break;
case GGML_TYPE_F32:
{
- ggml_compute_forward_conv_1d_s2_ph_f32(params, src0, src1, dst);
+ ggml_compute_forward_conv_transpose_1d_f32(params, src0, src1, dst);
} break;
default:
{
@@ -14093,27 +14441,6 @@ static void ggml_compute_forward_conv_1d_s2_ph(
}
}
-// ggml_compute_forward_conv_1d
-
-static void ggml_compute_forward_conv_1d(
- const struct ggml_compute_params * params,
- const struct ggml_tensor * src0,
- const struct ggml_tensor * src1,
- struct ggml_tensor * dst) {
- const int32_t s0 = ((const int32_t*)(dst->op_params))[0];
- const int32_t p0 = ((const int32_t*)(dst->op_params))[1];
- const int32_t d0 = ((const int32_t*)(dst->op_params))[2];
- GGML_ASSERT(d0 == 1); // dilation not supported
- GGML_ASSERT(p0 == src0->ne[0]/2); // only half padding supported
- if (s0 == 1) {
- ggml_compute_forward_conv_1d_s1_ph(params, src0, src1, dst);
- } else if (s0 == 2) {
- ggml_compute_forward_conv_1d_s2_ph(params, src0, src1, dst);
- } else {
- GGML_ASSERT(false); // only stride 1 and 2 supported
- }
-}
-
// ggml_compute_forward_conv_2d
static void ggml_compute_forward_conv_2d_f16_f32(
@@ -14156,20 +14483,22 @@ static void ggml_compute_forward_conv_2d_f16_f32(
{
ggml_fp16_t * const wdata = (ggml_fp16_t *) params->wdata + 0;
- for (int i12 = 0; i12 < ne12; i12++) {
- const float * const src = (float *)((char *) src1->data + i12*nb12);
- ggml_fp16_t * dst_data = wdata;
-
- for (int i1 = 0; i1 < ne1; i1++) {
- for (int i0 = 0; i0 < ne0; i0++) {
- for (int ik1 = 0; ik1 < nk1; ik1++) {
- for (int ik0 = 0; ik0 < nk0; ik0++) {
- const int idx0 = i0*s0 + ik0*d0 - p0;
- const int idx1 = i1*s1 + ik1*d1 - p1;
-
- if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) {
- dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
- GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]);
+ for (int i13 = 0; i13 < ne13; i13++) {
+ for (int i12 = 0; i12 < ne12; i12++) {
+ const float * const src = (float *)((char *) src1->data + i13*nb13 + i12*nb12);
+ ggml_fp16_t * dst_data = wdata + i13*(ne1*ne0*ew0);
+
+ for (int i1 = 0; i1 < ne1; i1++) {
+ for (int i0 = 0; i0 < ne0; i0++) {
+ for (int ik1 = 0; ik1 < nk1; ik1++) {
+ for (int ik0 = 0; ik0 < nk0; ik0++) {
+ const int idx0 = i0*s0 + ik0*d0 - p0;
+ const int idx1 = i1*s1 + ik1*d1 - p1;
+
+ if (!(idx1 < 0 || idx1 >= ne11 || idx0 < 0 || idx0 >= ne10)) {
+ dst_data[(i1*ne0 + i0)*ew0 + i12*(nk0*nk1) + ik1*nk0 + ik0] =
+ GGML_FP32_TO_FP16(src[idx1*ne10 + idx0]);
+ }
}
}
}
@@ -16452,6 +16781,18 @@ static void ggml_compute_forward(struct ggml_compute_params * params, struct ggm
{
ggml_compute_forward_conv_1d(params, tensor->src[0], tensor->src[1], tensor);
} break;
+ case GGML_OP_CONV_1D_STAGE_0:
+ {
+ ggml_compute_forward_conv_1d_stage_0(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
+ case GGML_OP_CONV_1D_STAGE_1:
+ {
+ ggml_compute_forward_conv_1d_stage_1(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ ggml_compute_forward_conv_transpose_1d(params, tensor->src[0], tensor->src[1], tensor);
+ } break;
case GGML_OP_CONV_2D:
{
ggml_compute_forward_conv_2d(params, tensor->src[0], tensor->src[1], tensor);
@@ -17377,10 +17718,22 @@ static void ggml_compute_backward(struct ggml_context * ctx, struct ggml_tensor
{
GGML_ASSERT(false); // TODO: not implemented
} break;
+ case GGML_OP_CONV_1D_STAGE_0:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
+ case GGML_OP_CONV_1D_STAGE_1:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
case GGML_OP_CONV_2D:
{
GGML_ASSERT(false); // TODO: not implemented
} break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ GGML_ASSERT(false); // TODO: not implemented
+ } break;
case GGML_OP_CONV_TRANSPOSE_2D:
{
GGML_ASSERT(false); // TODO: not implemented
@@ -18222,21 +18575,68 @@ struct ggml_cplan ggml_graph_plan(struct ggml_cgraph * cgraph, int n_threads) {
GGML_ASSERT(node->src[1]->ne[2] == 1);
GGML_ASSERT(node->src[1]->ne[3] == 1);
+ const int64_t ne00 = node->src[0]->ne[0];
+ const int64_t ne01 = node->src[0]->ne[1];
+ const int64_t ne02 = node->src[0]->ne[2];
+
+ const int64_t ne10 = node->src[1]->ne[0];
+ const int64_t ne11 = node->src[1]->ne[1];
+
+ const int64_t ne0 = node->ne[0];
+ const int64_t ne1 = node->ne[1];
+ const int64_t nk = ne00;
+ const int64_t ew0 = nk * ne01;
+
+ UNUSED(ne02);
+ UNUSED(ne10);
+ UNUSED(ne11);
+
size_t cur = 0;
- const int nk = node->src[0]->ne[0];
if (node->src[0]->type == GGML_TYPE_F16 &&
- node->src[1]->type == GGML_TYPE_F32) {
- cur = sizeof(ggml_fp16_t)*(
- nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
- ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
- );
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur = sizeof(ggml_fp16_t)*(ne0*ne1*ew0);
+ } else if (node->src[0]->type == GGML_TYPE_F32 &&
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur = sizeof(float)*(ne0*ne1*ew0);
+ } else {
+ GGML_ASSERT(false);
+ }
+
+ work_size = MAX(work_size, cur);
+ } break;
+ case GGML_OP_CONV_1D_STAGE_0:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_1D_STAGE_1:
+ {
+ n_tasks = n_threads;
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ n_tasks = n_threads;
+
+ GGML_ASSERT(node->src[0]->ne[3] == 1);
+ GGML_ASSERT(node->src[1]->ne[2] == 1);
+ GGML_ASSERT(node->src[1]->ne[3] == 1);
+
+ const int64_t ne00 = node->src[0]->ne[0]; // K
+ const int64_t ne01 = node->src[0]->ne[1]; // Cout
+ const int64_t ne02 = node->src[0]->ne[2]; // Cin
+
+ const int64_t ne10 = node->src[1]->ne[0]; // L
+ const int64_t ne11 = node->src[1]->ne[1]; // Cin
+
+ size_t cur = 0;
+ if (node->src[0]->type == GGML_TYPE_F16 &&
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
+ cur += sizeof(ggml_fp16_t)*ne10*ne11;
} else if (node->src[0]->type == GGML_TYPE_F32 &&
- node->src[1]->type == GGML_TYPE_F32) {
- cur = sizeof(float)*(
- nk*ggml_up32(node->src[0]->ne[1])*node->src[0]->ne[2] +
- ( 2*(nk/2) + node->src[1]->ne[0])*node->src[1]->ne[1]
- );
+ node->src[1]->type == GGML_TYPE_F32) {
+ cur += sizeof(float)*ne00*ne01*ne02;
+ cur += sizeof(float)*ne10*ne11;
} else {
GGML_ASSERT(false);
}
@@ -19362,7 +19762,7 @@ static enum ggml_opt_result ggml_opt_adam(
if (callback) {
callback(callback_data, accum_step, &sched, &cancel);
if (cancel) {
- break;
+ return GGML_OPT_CANCEL;
}
}
// ggml_graph_reset (gf);
@@ -19371,9 +19771,6 @@ static enum ggml_opt_result ggml_opt_adam(
ggml_opt_acc_grad(np, ps, g, accum_norm);
fx += ggml_get_f32_1d(f, 0);
}
- if (cancel) {
- return GGML_OPT_DID_NOT_CONVERGE;
- }
fx *= accum_norm;
opt->adam.fx_prev = fx;
@@ -19399,9 +19796,6 @@ static enum ggml_opt_result ggml_opt_adam(
// run the optimizer
for (int t = 0; t < params.adam.n_iter; ++t) {
- if (cancel) {
- break;
- }
opt->iter = iter0 + t + 1;
GGML_PRINT_DEBUG ("=== iter %d ===\n", t);
@@ -19459,7 +19853,7 @@ static enum ggml_opt_result ggml_opt_adam(
if (callback) {
callback(callback_data, accum_step, &sched, &cancel);
if (cancel) {
- break;
+ return GGML_OPT_CANCEL;;
}
}
// ggml_graph_reset (gf);
@@ -19468,9 +19862,6 @@ static enum ggml_opt_result ggml_opt_adam(
ggml_opt_acc_grad(np, ps, g, accum_norm);
fx += ggml_get_f32_1d(f, 0);
}
- if (cancel) {
- break;
- }
fx *= accum_norm;
opt->loss_after = fx;
@@ -19589,7 +19980,7 @@ static enum ggml_opt_result linesearch_backtracking(
finit = *fx;
dgtest = params->lbfgs.ftol*dginit;
- while (!*cancel) {
+ while (true) {
ggml_vec_cpy_f32(nx, x, xp);
ggml_vec_mad_f32(nx, x, d, *step);
@@ -19605,7 +19996,7 @@ static enum ggml_opt_result linesearch_backtracking(
float sched = 0;
callback(callback_data, accum_step, &sched, cancel);
if (*cancel) {
- break;
+ return GGML_OPT_CANCEL;
}
}
// ggml_graph_reset (gf);
@@ -19614,9 +20005,6 @@ static enum ggml_opt_result linesearch_backtracking(
ggml_opt_acc_grad(np, ps, g, accum_norm);
*fx += ggml_get_f32_1d(f, 0);
}
- if (*cancel) {
- break;
- }
*fx *= accum_norm;
}
@@ -19749,7 +20137,7 @@ static enum ggml_opt_result ggml_opt_lbfgs(
float sched = 0;
callback(callback_data, accum_step, &sched, &cancel);
if (cancel) {
- break;
+ return GGML_OPT_CANCEL;
}
}
// ggml_graph_reset (gf);
@@ -19758,9 +20146,6 @@ static enum ggml_opt_result ggml_opt_lbfgs(
ggml_opt_acc_grad(np, ps, g, accum_norm);
fx += ggml_get_f32_1d(f, 0);
}
- if (cancel) {
- return GGML_OPT_DID_NOT_CONVERGE;
- }
fx *= accum_norm;
opt->loss_before = fx;
@@ -19820,8 +20205,8 @@ static enum ggml_opt_result ggml_opt_lbfgs(
ggml_vec_cpy_f32(nx, gp, g);
ls = linesearch_backtracking(&params, nx, x, &fx, g, d, step, xp, f, gb, &cplan, np, ps, &cancel, callback, callback_data);
- if (!cancel) {
- break;
+ if (cancel) {
+ return GGML_OPT_CANCEL;
}
if (ls < 0) {
diff --git a/ggml.h b/ggml.h
index 460857fa..a9d4e33d 100644
--- a/ggml.h
+++ b/ggml.h
@@ -401,10 +401,14 @@ extern "C" {
GGML_OP_CLAMP,
GGML_OP_CONV_1D,
GGML_OP_CONV_2D,
+ GGML_OP_CONV_TRANSPOSE_1D,
GGML_OP_CONV_TRANSPOSE_2D,
GGML_OP_POOL_1D,
GGML_OP_POOL_2D,
+ GGML_OP_CONV_1D_STAGE_0, // internal
+ GGML_OP_CONV_1D_STAGE_1, // internal
+
GGML_OP_UPSCALE, // nearest interpolate
GGML_OP_FLASH_ATTN,
@@ -1386,6 +1390,14 @@ extern "C" {
int s,
int d);
+ GGML_API struct ggml_tensor * ggml_conv_transpose_1d(
+ struct ggml_context * ctx,
+ struct ggml_tensor * a,
+ struct ggml_tensor * b,
+ int s0,
+ int p0,
+ int d0);
+
GGML_API struct ggml_tensor * ggml_conv_2d(
struct ggml_context * ctx,
struct ggml_tensor * a,
@@ -1759,6 +1771,7 @@ extern "C" {
GGML_OPT_NO_CONTEXT,
GGML_OPT_INVALID_WOLFE,
GGML_OPT_FAIL,
+ GGML_OPT_CANCEL,
GGML_LINESEARCH_FAIL = -128,
GGML_LINESEARCH_MINIMUM_STEP,
diff --git a/k_quants.c b/k_quants.c
index a1e687dd..558f5fda 100644
--- a/k_quants.c
+++ b/k_quants.c
@@ -69,7 +69,6 @@ inline static int32_t vaddvq_s32(int32x4_t v) {
// 2-6 bit quantization in super-blocks
//
-
//
// ===================== Helper functions
//
@@ -348,7 +347,6 @@ void quantize_row_q2_K_reference(const float * restrict x, block_q2_K * restrict
const float q4scale = 15.f;
for (int i = 0; i < nb; i++) {
-
float max_scale = 0; // as we are deducting the min, scales are always positive
float max_min = 0;
for (int j = 0; j < QK_K/16; ++j) {
diff --git a/tests/test-grad0.cpp b/tests/test-grad0.cpp
index c3cd73bc..0a559b27 100644
--- a/tests/test-grad0.cpp
+++ b/tests/test-grad0.cpp
@@ -208,26 +208,6 @@ static struct ggml_tensor * get_random_tensor_i32(
return result;
}
-static void print_elements(const char* label, const struct ggml_tensor * t) {
- if (!t) {
- printf("%s: %s = null\n", __func__, label);
- return;
- }
- const int nelements = ggml_nelements(t);
- printf("%s: %s = [", __func__, label);
- for (int k = 0; k < nelements; ++k) {
- if (k > 0) { printf(", "); }
- printf("%.5f", ggml_get_f32_1d(t, k));
- }
- printf("] shape: [");
- for (int k = 0; k < t->n_dims; ++k) {
- if (k > 0) { printf(", "); }
- printf("%d", (int)t->ne[k]);
- }
- printf("]\n");
-
-}
-
static bool check_gradient(
const char * op_name,
struct ggml_context * ctx0,
diff --git a/tests/test-opt.cpp b/tests/test-opt.cpp
index fb4e0be9..bb8af596 100644
--- a/tests/test-opt.cpp
+++ b/tests/test-opt.cpp
@@ -40,27 +40,6 @@ static float frand(void) {
return (float)rand()/(float)RAND_MAX;
}
-static int irand(int n) {
- return rand()%n;
-}
-
-static void get_random_dims(int64_t * dims, int ndims) {
- dims[0] = dims[1] = dims[2] = dims[3] = 1;
-
- for (int i = 0; i < ndims; i++) {
- dims[i] = 1 + irand(4);
- }
-}
-
-static void get_random_dims_minmax(int64_t * dims, int ndims, int min, int max) {
- dims[0] = dims[1] = dims[2] = dims[3] = 1;
-
- for (int i = 0; i < ndims; i++) {
- dims[i] = min + irand(max-min);
- }
-}
-
-
static struct ggml_tensor * get_random_tensor(
struct ggml_context * ctx0, int ndims, int64_t ne[], float fmin, float fmax
) {
@@ -106,14 +85,6 @@ static struct ggml_tensor * get_random_tensor(
return result;
}
-static float get_element(const struct ggml_tensor * t, int idx) {
- return ((float *)t->data)[idx];
-}
-
-static void set_element(struct ggml_tensor * t, int idx, float value) {
- ((float *)t->data)[idx] = value;
-}
-
int main(void) {
struct ggml_init_params params = {
/* .mem_size = */ 1024*1024*1024,
diff --git a/tests/test-quantize-perf.cpp b/tests/test-quantize-perf.cpp
index 01aa6987..88fac0e2 100644
--- a/tests/test-quantize-perf.cpp
+++ b/tests/test-quantize-perf.cpp
@@ -76,22 +76,21 @@ static void * align_with_offset(void * ptr, int offset) {
return (char *) std::align(MAX_ALIGNMENT, MAX_ALIGNMENT, ptr, dummy_size) + offset;
}
-static void benchmark_function(size_t size, size_t q_size, int64_t iterations, const std::function<size_t(void)> & function) {
+static void benchmark_function(size_t size, size_t q_size, int64_t iterations, const std::function<float(void)> & func) {
int64_t min_time_us = INT64_MAX;
int64_t total_time_us = 0;
int64_t min_time_cycles = INT64_MAX;
int64_t total_time_cycles = 0;
for (int i = 0; i < WARMUP; i++) {
- function();
+ func();
}
-
for (int i = 0; i < iterations; i++) {
const int64_t start_time = ggml_time_us();
const int64_t start_cycles = cpu_cycles();
- function();
+ func();
const int64_t end_cycles = cpu_cycles();
const int64_t end_time = ggml_time_us();
@@ -245,15 +244,15 @@ int main(int argc, char * argv[]) {
std::vector<uint8_t> test_data1_v(largest*4 + MAX_ALIGNMENT*2);
std::vector<uint8_t> test_data2_v(largest*4 + MAX_ALIGNMENT*2);
- std::vector<uint8_t> test_q1_v(largest*4 + MAX_ALIGNMENT*2);
- std::vector<uint8_t> test_q2_v(largest*4 + MAX_ALIGNMENT*2);
- std::vector<uint8_t> test_out_v(largest*4 + MAX_ALIGNMENT*2);
+ std::vector<uint8_t> test_q1_v (largest*4 + MAX_ALIGNMENT*2);
+ std::vector<uint8_t> test_q2_v (largest*4 + MAX_ALIGNMENT*2);
+ std::vector<uint8_t> test_out_v (largest*4 + MAX_ALIGNMENT*2);
float * test_data1 = (float *) align_with_offset(test_data1_v.data(), params.alignment_offset);
float * test_data2 = (float *) align_with_offset(test_data2_v.data(), params.alignment_offset);
- float * test_q1 = (float *) align_with_offset(test_q1_v.data(), params.alignment_offset);
- float * test_q2 = (float *) align_with_offset(test_q2_v.data(), params.alignment_offset);
- float * test_out = (float *) align_with_offset(test_out_v.data(), params.alignment_offset);
+ float * test_q1 = (float *) align_with_offset(test_q1_v.data(), params.alignment_offset);
+ float * test_q2 = (float *) align_with_offset(test_q2_v.data(), params.alignment_offset);
+ float * test_out = (float *) align_with_offset(test_out_v.data(), params.alignment_offset);
generate_data(0, largest, test_data1);
generate_data(1, largest, test_data2);
@@ -283,7 +282,7 @@ int main(int argc, char * argv[]) {
printf(" quantize_row_q_reference\n");
for (size_t size : params.test_sizes) {
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
- auto quantize_fn = [&](void ) {
+ auto quantize_fn = [&](void) -> float {
qfns.from_float_reference(test_data1, test_q1, size);
return test_q1[0];
};
@@ -297,7 +296,7 @@ int main(int argc, char * argv[]) {
printf(" quantize_row_q\n");
for (size_t size : params.test_sizes) {
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
- auto quantize_fn = [&](void ) {
+ auto quantize_fn = [&](void) -> float {
qfns.from_float(test_data1, test_q1, size);
return test_q1[0];
};
@@ -312,7 +311,7 @@ int main(int argc, char * argv[]) {
qfns.from_float(test_data1, test_q1, largest);
for (size_t size : params.test_sizes) {
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
- auto quantize_fn = [&](void ) {
+ auto quantize_fn = [&](void) -> float {
qfns.to_float(test_q1, test_out, size);
return test_out[0];
};
@@ -326,7 +325,7 @@ int main(int argc, char * argv[]) {
printf(" quantize_row_q_dot\n");
for (size_t size : params.test_sizes) {
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
- auto quantize_fn = [&](void ) {
+ auto quantize_fn = [&](void) -> float {
auto vdot = ggml_internal_get_type_traits(qfns.vec_dot_type);
vdot.from_float(test_data1, test_q1, size);
return test_q1[0];
@@ -343,7 +342,7 @@ int main(int argc, char * argv[]) {
qfns.from_float(test_data2, test_q2, largest);
for (size_t size : params.test_sizes) {
printf(" %zu values (%.2f MB)\n", size, 4*size/(float)(1024*1024));
- auto quantize_fn = [&](void ) {
+ auto quantize_fn = [&](void) -> float {
float result;
qfns.vec_dot(size, &result, test_q1, test_q2);
return result;