diff options
author | zhangkaihuo <zhangkaihuo@gmail.com> | 2024-06-03 15:49:30 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-06-03 10:49:30 +0300 |
commit | 6f28a333c1e3fdfdc7b4f9d0367f2b41a9b7e9d4 (patch) | |
tree | 44e54d11d4fca4edda050f8206157065a06ac60d /llama.cpp | |
parent | 549279d8049d78620a2b081e26edb654f83c3bbd (diff) |
llama : MiniCPM support tied embeddings (#7664)
* support lm_head
* remove the code block
---------
Co-authored-by: zhangkaihuo <zhangkaihuo@modelbest.cn>
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 12 |
1 files changed, 5 insertions, 7 deletions
@@ -5124,12 +5124,10 @@ static bool llm_load_tensors( // output { model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); - if (model.arch != LLM_ARCH_MINICPM){ - model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED); - // if output is NULL, init from the input tok embed - if (model.output == NULL) { - model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); - } + model.output = ml.create_tensor(ctx_output_split, tn(LLM_TENSOR_OUTPUT, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_NOT_REQUIRED); + // if output is NULL, init from the input tok embed + if (model.output == NULL) { + model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); } } @@ -10212,7 +10210,7 @@ struct llm_build_context { cb(cur, "lmhead_scaling", -1); // lm_head - cur = ggml_mul_mat(ctx0, model.tok_embd, cur); + cur = ggml_mul_mat(ctx0, model.output, cur); cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); |