summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoragray3 <agray3@users.noreply.github.com>2024-10-20 07:36:16 +0100
committerGitHub <noreply@github.com>2024-10-20 08:36:16 +0200
commitf2d315b46f7aacc7df4b86bd8acba387b30e11ca (patch)
tree05df4e87b268b03a68f18907c9ee7492f9159518
parentafbf2ef3e263e5bee3150cf9c422039aa406f10d (diff)
Avoid rebuild of GGML graph for each token (#98)
Introduces caching of GGML graph to avoid unnecessary full rebuild between each token. KV cache parameters, which change with each token, are updated directly in cached GGML graph. Can be disabled with GGML_DISABLE_GRAPH_CACHING environment variable.
-rw-r--r--ggml/include/ggml-backend.h6
-rw-r--r--ggml/include/ggml.h7
-rw-r--r--ggml/src/ggml-backend.c45
-rw-r--r--src/llama.cpp116
4 files changed, 161 insertions, 13 deletions
diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h
index 5f3f1e28..621620bc 100644
--- a/ggml/include/ggml-backend.h
+++ b/ggml/include/ggml-backend.h
@@ -232,6 +232,12 @@ extern "C" {
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
GGML_API void ggml_backend_view_init(struct ggml_tensor * tensor);
+ // Utility to query whether cached GGML graph is in use
+ GGML_API bool ggml_use_cached_graph(ggml_backend_sched_t sched);
+
+ // Set whether or not to use GGML graph caching
+ GGML_API void ggml_set_cached_graph(ggml_backend_sched_t sched, bool set_value);
+
#ifdef __cplusplus
}
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index a467c297..a99dc6b5 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -597,6 +597,13 @@ extern "C" {
GGML_TENSOR_FLAG_PARAM = 4,
};
+ // Flag (used on GGML_OP_CPY nodes) on whether node is associated with K or V cache
+ enum ggml_kv_cache_flag {
+ GGML_KV_CACHE_FLAG_NONE = 0,
+ GGML_KV_CACHE_FLAG_K = 1,
+ GGML_KV_CACHE_FLAG_V = 2
+ };
+
// ggml object
struct ggml_object {
size_t offs;
diff --git a/ggml/src/ggml-backend.c b/ggml/src/ggml-backend.c
index e1651cc6..76d37f74 100644
--- a/ggml/src/ggml-backend.c
+++ b/ggml/src/ggml-backend.c
@@ -1040,6 +1040,13 @@ struct ggml_backend_sched_split {
struct ggml_cgraph graph;
};
+// Object to facilitate GML graph caching
+struct ggml_cached_graph {
+ bool is_active;
+ ggml_backend_t input_backend;
+ struct ggml_tensor * input_cpy[GGML_SCHED_MAX_SPLIT_INPUTS];
+};
+
struct ggml_backend_sched {
bool is_reset; // true if the scheduler has been reset since the last graph split
bool is_alloc;
@@ -1085,6 +1092,8 @@ struct ggml_backend_sched {
size_t context_buffer_size;
bool debug;
+
+ struct ggml_cached_graph cached_graph;
};
#define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
@@ -1762,6 +1771,14 @@ static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t s
struct ggml_tensor * input = split->inputs[j];
struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
+ if (!sched->cached_graph.is_active) {
+ sched->cached_graph.input_backend = input_backend;
+ sched->cached_graph.input_cpy[j] = input_cpy;
+ } else {
+ input_backend = sched->cached_graph.input_backend;
+ input_cpy = sched->cached_graph.input_cpy[j];
+ }
+
if (input->flags & GGML_TENSOR_FLAG_INPUT) {
// inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
@@ -1893,6 +1910,8 @@ ggml_backend_sched_t ggml_backend_sched_new(
ggml_backend_sched_reset(sched);
+ sched->cached_graph.is_active = false;
+
return sched;
}
@@ -1969,16 +1988,16 @@ enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, st
}
enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
- if (!sched->is_reset && !sched->is_alloc) {
- ggml_backend_sched_reset(sched);
- }
-
- if (!sched->is_alloc) {
- if (!ggml_backend_sched_alloc_graph(sched, graph)) {
- return GGML_STATUS_ALLOC_FAILED;
+ if(!sched->cached_graph.is_active) {
+ if (!sched->is_reset && !sched->is_alloc) {
+ ggml_backend_sched_reset(sched);
+ }
+ if (!sched->is_alloc) {
+ if (!ggml_backend_sched_alloc_graph(sched, graph)) {
+ return GGML_STATUS_ALLOC_FAILED;
+ }
}
}
-
return ggml_backend_sched_compute_splits(sched);
}
@@ -2243,3 +2262,13 @@ bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t
return true;
}
+
+bool ggml_use_cached_graph(ggml_backend_sched_t sched) {
+ return sched->cached_graph.is_active;
+}
+
+void ggml_set_cached_graph(ggml_backend_sched_t sched, bool set_value) {
+ sched->cached_graph.is_active = set_value;
+}
+
+
diff --git a/src/llama.cpp b/src/llama.cpp
index c950a46d..c5df16e3 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -8,6 +8,7 @@
#include "ggml.h"
#include "ggml-alloc.h"
#include "ggml-backend.h"
+#include "../ggml/src/ggml-impl.h"
#ifdef GGML_USE_RPC
# include "ggml-rpc.h"
@@ -2659,6 +2660,17 @@ struct llama_model {
}
};
+// Object used to allow caching of GGML graph between tokens where possible.
+struct ggml_cached_graph {
+ bool is_active = false;
+ ggml_cgraph * gf;
+ size_t n;
+ ggml_backend_t backend_res;
+ ggml_backend_t backend_embd;
+ struct ggml_tensor * res;
+ struct ggml_tensor * embd;
+};
+
struct llama_context {
llama_context(const llama_model & model)
: model(model)
@@ -2759,6 +2771,8 @@ struct llama_context {
struct ggml_tensor * inp_pos_bucket; // I32 [n_batch|n_kv, n_batch]
struct ggml_tensor * inp_embd_enc; // F32 [n_embd, n_outputs_enc]
struct ggml_tensor * inp_KQ_mask_cross; // F32 [n_outputs_enc, n_batch]
+
+ struct ggml_cached_graph cached_graph;
};
struct llama_lora_weight {
@@ -14877,11 +14891,44 @@ static int llama_decode_internal(
ggml_backend_sched_reset(lctx.sched);
ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
- ggml_cgraph * gf = llama_build_graph(lctx, u_batch, false);
+ ggml_cgraph * gf;
+ // the output is always the last tensor in the graph
+ struct ggml_tensor * res;
+ struct ggml_tensor * embd;
+
+ bool n_has_changed_since_last_token = false;
+ if(lctx.cached_graph.n != kv_self.n) n_has_changed_since_last_token = true;
+ lctx.cached_graph.n = kv_self.n;
+
+ // Re-build graph only if graph caching is not possible
+ if(!ggml_use_cached_graph(lctx.sched) || n_has_changed_since_last_token) {
+
+ gf = llama_build_graph(lctx, u_batch, false);
+
+ // Set whether GGML graph caching is in use within GGML module, based on
+ // whether caching was activated here during the previous token
+ ggml_set_cached_graph(lctx.sched,lctx.cached_graph.is_active);
+
+ // Disable future graph caching in presence of env var,
+ // if there are multiple devices, if batch size is greater than 1,
+ // or if nsplits is not 2.
+ // TO DO enable graph caching for these cases
+ bool disable_cached_ggml_graph = (getenv("GGML_DISABLE_GRAPH_CACHING") != nullptr)
+ || (llama_get_device_count(model) > 1)
+ || (ggml_backend_sched_get_n_splits(lctx.sched) != 2);
+ for (int i = 0 ; i < gf->n_nodes; i++) {
+ if (gf->nodes[i]->op == GGML_OP_ADD && gf->nodes[i]->src[1] && gf->nodes[i]->src[1]->ne[1] > 1) {
+ disable_cached_ggml_graph = true;
+ break;
+ }
+ }
+
+ // Set whether graph caching should be used for future tokens
+ lctx.cached_graph.is_active=!disable_cached_ggml_graph;
// the output is always the last tensor in the graph
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
- struct ggml_tensor * embd = gf->nodes[gf->n_nodes - 2];
+ res = gf->nodes[gf->n_nodes - 1];
+ embd = gf->nodes[gf->n_nodes - 2];
if (lctx.n_outputs == 0) {
// no output
@@ -14901,9 +14948,58 @@ static int llama_decode_internal(
embd = nullptr; // do not extract embeddings when not needed
GGML_ASSERT(strcmp(res->name, "result_output") == 0 && "missing result_output tensor");
}
+ lctx.cached_graph.res = res;
+ lctx.cached_graph.embd = embd;
// LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
ggml_backend_sched_alloc_graph(lctx.sched, gf);
+ }
+ else {
+ gf = lctx.cached_graph.gf;
+ res = lctx.cached_graph.res;
+ embd = lctx.cached_graph.embd;
+ }
+ lctx.cached_graph.gf = gf;
+
+ // Update K and V cache parameters in cached graph.
+ if(gf != nullptr && gf->nodes != nullptr && ggml_use_cached_graph(lctx.sched)) {
+
+ const struct llama_hparams & hparams = model.hparams;
+ const int64_t kv_head = kv_self.head;
+
+ for (int i = 0; i < gf->n_nodes; i++) {
+ ggml_tensor * node = gf->nodes[i];
+ if (node->op == GGML_OP_CPY) {
+
+ // K cache
+ const char* k_prefix = "k_cache_view-";
+ if (strncmp(node->src[1]->name, k_prefix, strlen(k_prefix)) == 0) {
+ int il = atoi(node->src[1]->name + strlen(k_prefix)); // Layer index from name
+ const int64_t n_embd_k_gqa = hparams.n_embd_k_gqa(il);
+ ggml_tensor * tmp_tensor = kv_self.k_l[il];
+ size_t tmp_offset = (ggml_row_size(kv_self.k_l[il]->type, n_embd_k_gqa))*kv_head;
+ node->src[1]->data = static_cast<char*>(tmp_tensor->data) + tmp_offset;
+ }
+
+ // V cache
+ const char* v_prefix = "v_cache_view-";
+ if (strncmp(node->src[1]->name, v_prefix, strlen(v_prefix)) == 0) {
+ int il = atoi(node->src[1]->name + strlen(v_prefix)); // Layer index from name
+ const int64_t n_embd_v_gqa = hparams.n_embd_v_gqa(il);
+ ggml_tensor * tmp_tensor = kv_self.v_l[il];
+ size_t tmp_offset;
+ if (cparams.flash_attn) {
+ tmp_offset = (kv_head)*ggml_row_size(kv_self.v_l[il]->type, n_embd_v_gqa);
+ } else {
+ tmp_offset = (kv_head)*ggml_element_size(kv_self.v_l[il]);
+ }
+ node->src[1]->data = static_cast<char*>(tmp_tensor->data) + tmp_offset;
+ }
+
+ }
+ }
+
+ }
llama_set_inputs(lctx, u_batch);
@@ -14927,12 +15023,18 @@ static int llama_decode_internal(
// extract logits
if (res) {
ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(lctx.sched, res);
- GGML_ASSERT(backend_res != nullptr);
- GGML_ASSERT(lctx.logits != nullptr);
float * logits_out = lctx.logits + n_outputs_prev*n_vocab;
const int32_t n_outputs_new = lctx.n_outputs;
+ if(!ggml_use_cached_graph(lctx.sched))
+ lctx.cached_graph.backend_res = backend_res;
+ else
+ backend_res = lctx.cached_graph.backend_res;
+
+ GGML_ASSERT(backend_res != nullptr);
+ GGML_ASSERT(lctx.logits != nullptr);
+
if (n_outputs_new) {
GGML_ASSERT( n_outputs_prev + n_outputs_new <= n_outputs);
GGML_ASSERT((n_outputs_prev + n_outputs_new)*n_vocab <= (int64_t) lctx.logits_size);
@@ -14943,6 +15045,10 @@ static int llama_decode_internal(
// extract embeddings
if (embd) {
ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched, embd);
+ if(!ggml_use_cached_graph(lctx.sched))
+ lctx.cached_graph.backend_embd = backend_embd;
+ else
+ backend_embd = lctx.cached_graph.backend_embd;
GGML_ASSERT(backend_embd != nullptr);
switch (cparams.pooling_type) {