summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-10-17 22:24:50 +0200
committerGitHub <noreply@github.com>2023-10-17 22:24:50 +0200
commitcb33f43a2a9f5a5a5f8d290dd97c625d9ba97a2f (patch)
tree1e0972e21a9c5d891c746415f5ae2f5dc9f4277e /llama.cpp
parente1675d133c31e1c8de2f06be7164e12c0ba6cf2c (diff)
fix embeddings when using CUDA (#3657)
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp19
1 files changed, 13 insertions, 6 deletions
diff --git a/llama.cpp b/llama.cpp
index 37df8877..04a779e0 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5903,6 +5903,13 @@ static int llama_decode_internal(
ggml_allocr_alloc_graph(lctx.alloc, gf);
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
+
+ GGML_ASSERT(strcmp(res->name, "result_output") == 0);
+ GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
+
+
#ifdef GGML_USE_CUBLAS
for (int i = 0; i < gf->n_leafs; i++) {
ggml_tensor * node = gf->leafs[i];
@@ -5920,6 +5927,12 @@ static int llama_decode_internal(
}
ggml_cuda_set_mul_mat_q(cparams.mul_mat_q);
+
+ // HACK: ggml-alloc may change the tensor backend when reusing a parent, so force output to be on the CPU here if needed
+ if (!lctx.embedding.empty()) {
+ embeddings->backend = GGML_BACKEND_CPU;
+ }
+ res->backend = GGML_BACKEND_CPU;
#endif
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
@@ -5944,12 +5957,6 @@ static int llama_decode_internal(
n_threads = 1;
}
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
- struct ggml_tensor * embeddings = gf->nodes[gf->n_nodes - 2];
-
- GGML_ASSERT(strcmp(res->name, "result_output") == 0);
- GGML_ASSERT(strcmp(embeddings->name, "result_norm") == 0);
-
#if GGML_USE_MPI
const int64_t n_layer = hparams.n_layer;
ggml_mpi_graph_compute_pre(lctx.ctx_mpi, gf, n_layer);