diff options
author | Ren Xuancheng <jklj077@users.noreply.github.com> | 2024-04-18 19:38:04 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-18 14:38:04 +0300 |
commit | e11b2e6e1e18522ca7cf129600875a0f6fb9307d (patch) | |
tree | eb7e3ad0a777f6e8ea599b3be07a6ea9da386fbb | |
parent | c71bfd736ee99a56e697697b39240f2ee06ed26d (diff) |
Qwen2 : assume tied weights if lm_head/output weights is missing (#6738)
-rw-r--r-- | llama.cpp | 8 |
1 files changed, 7 insertions, 1 deletions
@@ -5184,7 +5184,13 @@ static bool llm_load_tensors( // output { model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); - model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}); + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, false); + // if output is NULL, init from the input tok embed + if (model.output == NULL) { + model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}); + ml.n_created--; // artificial tensor + ml.size_data += ggml_nbytes(model.output); + } } for (int i = 0; i < n_layer; ++i) { |