summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-07-26 09:41:04 +0200
committerGitHub <noreply@github.com>2024-07-26 09:41:04 +0200
commit94b5916319cf1f00c0215dfcee9b531896476c5f (patch)
tree0eaae2c122860b90e368e95f19fdd44ceb1396fd /llama.cpp
parentc2158c15d9d3d916e564411f19afd8138dc8317c (diff)
Offload Bitnet token embeddings to the GPU (#1)
* bitnet: put token embeddings on the GPU * Update README with the new CUDA/Meat performance --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp18
1 files changed, 16 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 52c9d571..dba3b1ce 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5355,8 +5355,22 @@ static bool llm_load_tensors(
bool use_mmap_buffer = true;
// there is very little benefit to offloading the input layer, so always keep it on the CPU
- model.buft_input = llama_default_buffer_type_cpu(true);
- //model.buft_input = llama_default_buffer_type_offload(main_gpu);
+ //model.buft_input = llama_default_buffer_type_cpu(true);
+ //
+ // Well, this is not really true when the model uses the same tensor for token embeddings and for output
+ // (e.g., Bitnet, Gemma). If we use the above, then the matrix multiplication with the output tensor runs
+ // on the CPU, which can have quite a significant impact on performance. For instance, for 3B-Bitnet, I get
+ // TG-128 = ~240 t/s on an RTX-4080 with the above, and TG-128 = 320 t/s with the version below.
+ // The issue with just generically putting token embeddings on the GPU is that CUDA supports the GET_ROWS
+ // operation only for F16 and legacy quants, and this leads to a massive drop in performance when token embeddings
+ // are quantized with a k- or i-quant (which is almost always true). The back-end related stuff and offloading
+ // to the GPU has become quite opaque and hard to understand, so for now we fix this just for Bitnet
+ // (where token_embeddings is quantized with Q8_0).
+ if (model.arch == LLM_ARCH_BITNET) {
+ model.buft_input = llama_default_buffer_type_offload(model, main_gpu);
+ } else {
+ model.buft_input = llama_default_buffer_type_cpu(true);
+ }
model.buft_layer.resize(n_layer);