summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--llama.cpp22
1 files changed, 4 insertions, 18 deletions
diff --git a/llama.cpp b/llama.cpp
index dba3b1ce..169f7d68 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5355,22 +5355,7 @@ static bool llm_load_tensors(
bool use_mmap_buffer = true;
// there is very little benefit to offloading the input layer, so always keep it on the CPU
- //model.buft_input = llama_default_buffer_type_cpu(true);
- //
- // Well, this is not really true when the model uses the same tensor for token embeddings and for output
- // (e.g., Bitnet, Gemma). If we use the above, then the matrix multiplication with the output tensor runs
- // on the CPU, which can have quite a significant impact on performance. For instance, for 3B-Bitnet, I get
- // TG-128 = ~240 t/s on an RTX-4080 with the above, and TG-128 = 320 t/s with the version below.
- // The issue with just generically putting token embeddings on the GPU is that CUDA supports the GET_ROWS
- // operation only for F16 and legacy quants, and this leads to a massive drop in performance when token embeddings
- // are quantized with a k- or i-quant (which is almost always true). The back-end related stuff and offloading
- // to the GPU has become quite opaque and hard to understand, so for now we fix this just for Bitnet
- // (where token_embeddings is quantized with Q8_0).
- if (model.arch == LLM_ARCH_BITNET) {
- model.buft_input = llama_default_buffer_type_offload(model, main_gpu);
- } else {
- model.buft_input = llama_default_buffer_type_cpu(true);
- }
+ model.buft_input = llama_default_buffer_type_cpu(true);
model.buft_layer.resize(n_layer);
@@ -6729,7 +6714,8 @@ static bool llm_load_tensors(
// output
{
- model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
+ model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd});
+ model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading
}
const uint32_t n_ff = hparams.n_ff;
@@ -12055,7 +12041,7 @@ struct llm_build_context {
cb(cur, "result_norm", -1);
// lm_head
- cur = ggml_mul_mat(ctx0, model.tok_embd, cur);
+ cur = ggml_mul_mat(ctx0, model.output, cur);
cb(cur, "result_output", -1);
ggml_build_forward_expand(gf, cur);