From f439e506e8ae8b01df2ae2156380f8156d7553e3 Mon Sep 17 00:00:00 2001 From: Herman Semenov Date: Fri, 20 Oct 2023 10:02:12 +0000 Subject: ggml : fix rope + llama minor optimizations (#3560) * Minor fixes and fixed memleak * Using const auto references in range-based loop C++17 --- ggml.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'ggml.c') diff --git a/ggml.c b/ggml.c index 630deb49..ed157aab 100644 --- a/ggml.c +++ b/ggml.c @@ -13537,7 +13537,7 @@ static void ggml_compute_forward_rope_f16( dst_data[n_dims] = GGML_FP32_TO_FP16(x2*cos_block_theta - x3*sin_block_theta); dst_data[n_dims/2*3] = GGML_FP32_TO_FP16(x2*sin_block_theta + x3*cos_block_theta); } - } if (!is_neox) { + } else if (!is_neox) { for (int64_t i0 = 0; i0 < ne0; i0 += 2) { const float cos_theta = cosf(theta); const float sin_theta = sinf(theta); @@ -19170,6 +19170,7 @@ void ggml_graph_export(const struct ggml_cgraph * cgraph, const char * fname) { if (idx == -1) { fprintf(stderr, "%s: failed to find tensor, arg = %d, node = %d\n", __func__, j, i); + fclose(fout); return; } -- cgit v1.2.3