summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-03-25 16:09:54 +0200
committerGeorgi Gerganov <ggerganov@gmail.com>2023-03-25 16:10:14 +0200
commit2a2e63ce0503d9bf3e55283e40a052c78c1cc3a8 (patch)
tree54cdb3b01713d6be5ac9cb907b8eb19d785d0a01 /llama.cpp
parente899bf54b291e8c84173a0e534a2c262f3f63229 (diff)
Fix nasty bug in ggml_compute_forward_mul_mat_f32() and reenable BLAS
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp5
1 files changed, 2 insertions, 3 deletions
diff --git a/llama.cpp b/llama.cpp
index 14de611a..bb7bdead 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -917,8 +917,7 @@ static bool llama_eval_internal(
struct ggml_tensor * KQ_scaled =
ggml_scale(ctx0,
KQ,
- ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head))
- );
+ ggml_new_f32(ctx0, 1.0f/sqrt(float(n_embd)/n_head)));
// KQ_masked = mask_past(KQ_scaled)
struct ggml_tensor * KQ_masked = ggml_diag_mask_inf(ctx0, KQ_scaled, n_past);
@@ -934,7 +933,7 @@ static bool llama_eval_internal(
ggml_view_1d(ctx0, kv_self.v, (n_past + N)*n_embd, il*n_ctx*ggml_element_size(kv_self.v)*n_embd),
n_embd/n_head, n_head, n_past + N),
1, 2, 0, 3),
- ggml_new_tensor_3d(ctx0, GGML_TYPE_F32, n_past + N, n_embd/n_head, n_head));
+ ggml_new_tensor_3d(ctx0, kv_self.v->type, n_past + N, n_embd/n_head, n_head));
// KQV = transpose(V) * KQ_soft_max
struct ggml_tensor * KQV = ggml_mul_mat(ctx0, V_trans, KQ_soft_max);