summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorJohn <78893154+cmp-nct@users.noreply.github.com>2024-01-18 23:12:15 +0100
committerGitHub <noreply@github.com>2024-01-19 00:12:15 +0200
commit57e2a7a52a819883f40dada8a2edc24ecf48186b (patch)
tree52663a84fbfcb73b36231fff3c6b4a9ba0a1773e /llama.cpp
parent9b6ea4263ab45e02ff905bf7a29dc143ca1facc3 (diff)
llama : fix falcon arch for tied output embeddings (#4978)
* falcon arch fix for tied output embeddings * Update llama.cpp Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update llama.cpp * Update llama.cpp Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update llama.cpp --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp7
1 files changed, 6 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index f1d00a96..47b4384a 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -3438,7 +3438,12 @@ static bool llm_load_tensors(
{
model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
model.output_norm_b = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "bias"), {n_embd});
- model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
+ if (gguf_find_tensor(ml.ctx_gguf, tn(LLM_TENSOR_OUTPUT, "weight").c_str()) >= 0) {
+ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab});
+ } else {
+ model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); // needs to be on GPU
+ ml.n_created--; // artificial tensor
+ }
}
for (int i = 0; i < n_layer; ++i) {