diff options
author | Kawrakow <48489457+ikawrakow@users.noreply.github.com> | 2024-07-26 12:57:23 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-07-26 12:57:23 +0200 |
commit | 0684c3e9c70d49323b4fc517128cbe222cab7f96 (patch) | |
tree | a193b03f1f02a4e0eba858e29b8c15de45604153 /llama.cpp | |
parent | 94b5916319cf1f00c0215dfcee9b531896476c5f (diff) |
Offload Bitnet token embeddings to the GPU - the right way (#2)
OK, I should have checked how it was done for Gemma and do
the same for Bitnet. But better late than never.
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 22 |
1 files changed, 4 insertions, 18 deletions
@@ -5355,22 +5355,7 @@ static bool llm_load_tensors( bool use_mmap_buffer = true; // there is very little benefit to offloading the input layer, so always keep it on the CPU - //model.buft_input = llama_default_buffer_type_cpu(true); - // - // Well, this is not really true when the model uses the same tensor for token embeddings and for output - // (e.g., Bitnet, Gemma). If we use the above, then the matrix multiplication with the output tensor runs - // on the CPU, which can have quite a significant impact on performance. For instance, for 3B-Bitnet, I get - // TG-128 = ~240 t/s on an RTX-4080 with the above, and TG-128 = 320 t/s with the version below. - // The issue with just generically putting token embeddings on the GPU is that CUDA supports the GET_ROWS - // operation only for F16 and legacy quants, and this leads to a massive drop in performance when token embeddings - // are quantized with a k- or i-quant (which is almost always true). The back-end related stuff and offloading - // to the GPU has become quite opaque and hard to understand, so for now we fix this just for Bitnet - // (where token_embeddings is quantized with Q8_0). - if (model.arch == LLM_ARCH_BITNET) { - model.buft_input = llama_default_buffer_type_offload(model, main_gpu); - } else { - model.buft_input = llama_default_buffer_type_cpu(true); - } + model.buft_input = llama_default_buffer_type_cpu(true); model.buft_layer.resize(n_layer); @@ -6729,7 +6714,8 @@ static bool llm_load_tensors( // output { - model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output_norm = ml.create_tensor(ctx_output, tn(LLM_TENSOR_OUTPUT_NORM, "weight"), {n_embd}); + model.output = ml.create_tensor(ctx_output, tn(LLM_TENSOR_TOKEN_EMBD, "weight"), {n_embd, n_vocab}, llama_model_loader::TENSOR_DUPLICATED); // same as tok_embd, duplicated to allow offloading } const uint32_t n_ff = hparams.n_ff; @@ -12055,7 +12041,7 @@ struct llm_build_context { cb(cur, "result_norm", -1); // lm_head - cur = ggml_mul_mat(ctx0, model.tok_embd, cur); + cur = ggml_mul_mat(ctx0, model.output, cur); cb(cur, "result_output", -1); ggml_build_forward_expand(gf, cur); |