summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp1131
1 files changed, 608 insertions, 523 deletions
diff --git a/llama.cpp b/llama.cpp
index ad7b7b7d..38e7036a 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -979,21 +979,6 @@ static std::string gguf_kv_to_str(const struct gguf_context * ctx_gguf, int i) {
}
//
-// ggml helpers
-//
-
-static void ggml_graph_compute_helper(std::vector<uint8_t> & buf, ggml_cgraph * graph, int n_threads) {
- struct ggml_cplan plan = ggml_graph_plan(graph, n_threads);
-
- if (plan.work_size > 0) {
- buf.resize(plan.work_size);
- plan.work_data = buf.data();
- }
-
- ggml_graph_compute(graph, &plan);
-}
-
-//
// llama helpers
//
@@ -1728,6 +1713,7 @@ struct llama_hparams {
struct llama_cparams {
uint32_t n_ctx; // context size used during inference
uint32_t n_batch;
+ uint32_t n_ubatch;
uint32_t n_threads; // number of threads to use for generation
uint32_t n_threads_batch; // number of threads to use for batch processing
@@ -2024,8 +2010,7 @@ struct llama_context {
ggml_vk_free_cpu_assist();
#endif
- ggml_backend_buffer_free(buf_input);
- ggml_free(ctx_input);
+ ggml_backend_buffer_free(buf_output);
}
llama_cparams cparams;
@@ -2051,12 +2036,20 @@ struct llama_context {
int64_t t_p_eval_us = 0;
int64_t t_eval_us = 0;
+ int64_t t_compute_start_us = 0;
+ int64_t n_queued_tokens = 0;
+
int32_t n_sample = 0; // number of tokens sampled
int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
int32_t n_eval = 0; // number of eval calls
- // logits output (2-dimensional array: [n_tokens][n_vocab])
- std::vector<float> logits;
+ // host buffer for the model output (logits and embeddings)
+ ggml_backend_buffer_t buf_output = nullptr;
+
+ // decode output (2-dimensional array: [n_tokens][n_vocab])
+ size_t logits_size = 0;
+ float * logits = nullptr;
+
#ifndef NDEBUG
// guard against access to unset logits
std::vector<bool> logits_valid;
@@ -2065,7 +2058,8 @@ struct llama_context {
// embeddings output (2-dimensional array: [n_tokens][n_embd])
// populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
- std::vector<float> embd;
+ size_t embd_size = 0;
+ float * embd = nullptr;
// sequence embeddings output (map of [n_embd] vectors)
// populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
@@ -2079,8 +2073,6 @@ struct llama_context {
void * abort_callback_data = nullptr;
// input tensors
- ggml_backend_buffer_t buf_input = nullptr;
- ggml_context * ctx_input = nullptr;
struct ggml_tensor * inp_tokens; // I32 [n_batch]
struct ggml_tensor * inp_embd; // F32 [n_embd, n_batch]
struct ggml_tensor * inp_pos; // I32 [n_batch]
@@ -2090,7 +2082,7 @@ struct llama_context {
struct ggml_tensor * inp_mean; // F32 [n_batch, n_batch]
struct ggml_tensor * inp_cls; // I32 [n_batch]
struct ggml_tensor * inp_s_copy; // I32 [kv_size]
- struct ggml_tensor * inp_s_mask; // F32 [kv_size]
+ struct ggml_tensor * inp_s_mask; // F32 [1, kv_size]
struct ggml_tensor * inp_s_seq; // I32 [kv_size, n_batch]
#ifdef GGML_USE_MPI
@@ -4005,6 +3997,7 @@ static bool llm_load_tensors(
// there is very little benefit to offloading the input layer, so always keep it on the CPU
model.buft_input = llama_default_buffer_type_cpu(true);
+ //model.buft_input = llama_default_buffer_type_offload(main_gpu);
model.buft_layer.resize(n_layer);
@@ -5094,29 +5087,32 @@ enum llm_norm_type {
static struct ggml_tensor * llm_build_inp_embd(
struct ggml_context * ctx,
+ struct llama_context & lctx,
const llama_hparams & hparams,
const llama_batch & batch,
struct ggml_tensor * tok_embd,
- struct ggml_tensor * inp_tokens,
- struct ggml_tensor * inp_embd,
const llm_build_cb & cb) {
const int64_t n_embd = hparams.n_embd;
struct ggml_tensor * inpL;
if (batch.token) {
- struct ggml_tensor * inp_tokens_v = ggml_view_1d(ctx, inp_tokens, batch.n_tokens, 0);
- cb(inp_tokens, "inp_tokens", -1);
+ lctx.inp_tokens = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, batch.n_tokens);
+ cb(lctx.inp_tokens, "inp_tokens", -1);
+ ggml_set_input(lctx.inp_tokens);
- inpL = ggml_get_rows(ctx, tok_embd, inp_tokens_v);
+ inpL = ggml_get_rows(ctx, tok_embd, lctx.inp_tokens);
} else {
#ifdef GGML_USE_MPI
GGML_ASSERT(false && "not implemented");
#endif
-
- inpL = ggml_view_2d(ctx, inp_embd, n_embd, batch.n_tokens, inp_embd->nb[1], 0);
+ lctx.inp_embd = ggml_new_tensor_2d(ctx, GGML_TYPE_F32, n_embd, batch.n_tokens);
+ inpL = lctx.inp_embd;
+ ggml_set_input(lctx.inp_embd);
}
+ cb(inpL, "inp_embd", -1);
+
return inpL;
}
@@ -5420,7 +5416,7 @@ static struct ggml_tensor * llm_build_kv(
struct llm_build_context {
const llama_model & model;
- const llama_context & lctx;
+ llama_context & lctx;
const llama_hparams & hparams;
const llama_cparams & cparams;
const llama_batch & batch;
@@ -5513,6 +5509,18 @@ struct llm_build_context {
};
ctx0 = ggml_init(params);
+
+ lctx.inp_tokens = nullptr;
+ lctx.inp_embd = nullptr;
+ lctx.inp_pos = nullptr;
+ lctx.inp_KQ_mask = nullptr;
+ lctx.inp_KQ_pos = nullptr;
+ lctx.inp_K_shift = nullptr;
+ lctx.inp_mean = nullptr;
+ lctx.inp_cls = nullptr;
+ lctx.inp_s_copy = nullptr;
+ lctx.inp_s_mask = nullptr;
+ lctx.inp_s_seq = nullptr;
}
void free() {
@@ -5527,6 +5535,10 @@ struct llm_build_context {
GGML_ASSERT(kv_self.size == n_ctx);
+ lctx.inp_K_shift = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_ctx);
+ cb(lctx.inp_K_shift, "K_shift", -1);
+ ggml_set_input(lctx.inp_K_shift);
+
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * tmp =
// we rotate only the first n_rot dimensions
@@ -5550,12 +5562,14 @@ struct llm_build_context {
GGML_ASSERT(kv_self.recurrent);
+ struct ggml_tensor * state_copy = build_inp_s_copy();
+
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * conv_states = ggml_reshape_2d(ctx0, kv_self.k_l[il], hparams.n_embd_k_s(), kv_self.size);
struct ggml_tensor * ssm_states = ggml_reshape_2d(ctx0, kv_self.v_l[il], hparams.n_embd_v_s(), kv_self.size);
- conv_states = ggml_get_rows(ctx0, conv_states, lctx.inp_s_copy);
- ssm_states = ggml_get_rows(ctx0, ssm_states, lctx.inp_s_copy);
+ conv_states = ggml_get_rows(ctx0, conv_states, state_copy);
+ ssm_states = ggml_get_rows(ctx0, ssm_states, state_copy);
// TODO: name the intermediate tensors with cb()
@@ -5615,6 +5629,66 @@ struct llm_build_context {
return gf;
}
+ struct ggml_tensor * build_inp_pos() {
+ lctx.inp_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(lctx.inp_pos, "inp_pos", -1);
+ ggml_set_input(lctx.inp_pos);
+ return lctx.inp_pos;
+ }
+
+ struct ggml_tensor * build_inp_KQ_mask(bool causal = true) {
+ if (causal) {
+ lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_kv, n_tokens);
+ } else {
+ lctx.inp_KQ_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
+ }
+ cb(lctx.inp_KQ_mask, "KQ_mask", -1);
+ ggml_set_input(lctx.inp_KQ_mask);
+ return lctx.inp_KQ_mask;
+ }
+
+ struct ggml_tensor * build_inp_KQ_pos() {
+ lctx.inp_KQ_pos = ggml_new_tensor_1d(ctx0, GGML_TYPE_F32, n_kv);
+ cb(lctx.inp_KQ_pos, "KQ_pos", -1);
+ ggml_set_input(lctx.inp_KQ_pos);
+ return lctx.inp_KQ_pos;
+ }
+
+ struct ggml_tensor * build_inp_mean() {
+ lctx.inp_mean = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_tokens, n_tokens);
+ cb(lctx.inp_mean, "inp_mean", -1);
+ ggml_set_input(lctx.inp_mean);
+ return lctx.inp_mean;
+ }
+
+ struct ggml_tensor * build_inp_cls() {
+ lctx.inp_cls = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_tokens);
+ cb(lctx.inp_cls, "inp_cls", -1);
+ ggml_set_input(lctx.inp_cls);
+ return lctx.inp_cls;
+ }
+
+ struct ggml_tensor * build_inp_s_copy() {
+ lctx.inp_s_copy = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, kv_self.size);
+ cb(lctx.inp_s_copy, "inp_s_copy", -1);
+ ggml_set_input(lctx.inp_s_copy);
+ return lctx.inp_s_copy;
+ }
+
+ struct ggml_tensor * build_inp_s_mask() {
+ lctx.inp_s_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, 1, n_kv);
+ cb(lctx.inp_s_mask, "inp_s_mask", -1);
+ ggml_set_input(lctx.inp_s_mask);
+ return lctx.inp_s_mask;
+ }
+
+ struct ggml_tensor * build_inp_s_seq() {
+ lctx.inp_s_seq = ggml_new_tensor_2d(ctx0, GGML_TYPE_I32, n_kv, n_tokens);
+ cb(lctx.inp_s_seq, "inp_s_seq", -1);
+ ggml_set_input(lctx.inp_s_seq);
+ return lctx.inp_s_seq;
+ }
+
struct ggml_cgraph * build_llama() {
struct ggml_cgraph * gf = ggml_new_graph_custom(ctx0, LLAMA_MAX_NODES, false);
@@ -5625,16 +5699,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -5686,7 +5757,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -5804,20 +5874,16 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
// positions of the tokens in the KV cache
- struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
- cb(KQ_pos, "KQ_pos", -1);
+ struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -5865,7 +5931,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -5921,16 +5986,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * attn_norm;
@@ -5984,7 +6046,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = cur;
@@ -6035,21 +6096,17 @@ struct llm_build_context {
GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
struct ggml_tensor * cur;
- struct ggml_tensor * pos;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
- pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
+ struct ggml_tensor * pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
cb(pos, "pos_embd", -1);
inpL = ggml_add(ctx0, inpL, pos);
@@ -6083,7 +6140,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
// add the input
@@ -6135,16 +6191,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * residual = inpL;
@@ -6284,7 +6337,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Q, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, residual, cur);
@@ -6338,16 +6390,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
// positions of the tokens in the KV cache
- struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
- cb(KQ_pos, "KQ_pos", -1);
+ struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -6377,7 +6426,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -6433,15 +6481,12 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- // get input vectors with right size
- const size_t stride1 = n_tokens * ggml_type_size(lctx.inp_tokens->type);
-
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- struct ggml_tensor * inp_mean = ggml_view_2d(ctx0, lctx.inp_mean, n_tokens, n_tokens, stride1, 0);
- struct ggml_tensor * inp_cls = ggml_view_1d(ctx0, lctx.inp_cls, n_tokens, 0);
+ struct ggml_tensor * inp_pos = build_inp_pos();
+ struct ggml_tensor * inp_mean = build_inp_mean();
+ struct ggml_tensor * inp_cls = build_inp_cls();
// construct input embeddings (token, type, position)
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// token types are hardcoded to zero ("Sentence A")
struct ggml_tensor * type_row0 = ggml_view_1d(ctx0, model.type_embd, n_embd, 0);
@@ -6456,8 +6501,7 @@ struct llm_build_context {
cb(inpL, "inp_norm", -1);
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_cont(ctx0, ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_tokens, n_tokens, n_tokens*ggml_type_size(lctx.inp_KQ_mask->type), 0));
- cb(KQ_mask, "KQ_mask", -1); // [n_tokens, n_tokens]
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask(false);
// iterate layers
for (int il = 0; il < n_layer; ++il) {
@@ -6619,16 +6663,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
// positions of the tokens in the KV cache
- struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
- cb(KQ_pos, "KQ_pos", -1);
+ struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
inpL = llm_build_norm(ctx0, inpL, hparams,
model.tok_norm,
@@ -6664,7 +6705,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
// Add the input
@@ -6716,16 +6756,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
// positions of the tokens in the KV cache
- struct ggml_tensor * KQ_pos = ggml_view_1d(ctx0, lctx.inp_KQ_pos, n_kv, 0);
- cb(KQ_pos, "KQ_pos", -1);
+ struct ggml_tensor * KQ_pos = build_inp_KQ_pos();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * attn_norm;
@@ -6766,7 +6803,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, KQ_pos, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
// Add the input
@@ -6821,16 +6857,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -6883,7 +6916,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -6939,16 +6971,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -6993,7 +7022,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -7048,16 +7076,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -7109,7 +7134,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -7164,16 +7188,13 @@ struct llm_build_context {
struct ggml_tensor * ffn_output;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
attn_norm_output = llm_build_norm(ctx0, inpL, hparams,
@@ -7231,7 +7252,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f, cb, il);
- cb(cur, "kqv_out", il);
}
// FF
@@ -7281,16 +7301,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
@@ -7329,7 +7346,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * sa_out = cur;
@@ -7383,16 +7399,13 @@ struct llm_build_context {
struct ggml_tensor * pos;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
pos = ggml_get_rows(ctx0, model.pos_embd, inp_pos);
cb(pos, "pos_embd", -1);
@@ -7428,7 +7441,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
// add the input
@@ -7481,16 +7493,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
cur = llm_build_norm(ctx0, inpL, hparams,
@@ -7532,7 +7541,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
// add the input
@@ -7584,16 +7592,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -7645,7 +7650,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -7698,16 +7702,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -7759,7 +7760,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
@@ -7821,20 +7821,17 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// scale the input embeddings
inpL = ggml_scale(ctx0, inpL, scale_embd);
cb(inpL, "inp_scaled", -1);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -7886,7 +7883,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, model.layers[il].bo,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f/sqrtf(float(n_embd_head)), cb, il);
- cb(cur, "kqv_out", il);
}
// scale_res - scale the hidden states for residual connection
@@ -7953,22 +7949,18 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
inpL = ggml_scale(ctx0, inpL, sqrtf(n_embd));
cb(inpL, "inp_scaled", -1);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
-
// norm
cur = llm_build_norm(ctx0, inpL, hparams,
model.layers[il].attn_norm, NULL,
@@ -8005,7 +7997,6 @@ struct llm_build_context {
cur = llm_build_kv(ctx0, model, hparams, kv_self, gf,
model.layers[il].wo, NULL,
Kcur, Vcur, Qcur, KQ_mask, nullptr, n_ctx, n_tokens, kv_head, n_kv, 1.0f, cb, il);
- cb(cur, "kqv_out", il);
}
struct ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
@@ -8060,16 +8051,13 @@ struct llm_build_context {
struct ggml_tensor * cur;
struct ggml_tensor * inpL;
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
// inp_pos - contains the positions
- struct ggml_tensor * inp_pos = ggml_view_1d(ctx0, lctx.inp_pos, n_tokens, 0);
- cb(inp_pos, "inp_pos", -1);
+ struct ggml_tensor * inp_pos = build_inp_pos();
// KQ_mask (mask for 1 head, it will be broadcasted to all heads)
- struct ggml_tensor * KQ_mask = ggml_view_2d(ctx0, lctx.inp_KQ_mask, n_kv, n_tokens, n_kv*ggml_type_size(lctx.inp_KQ_mask->type), 0);
- cb(KQ_mask, "KQ_mask", -1);
+ struct ggml_tensor * KQ_mask = build_inp_KQ_mask();
for (int il = 0; il < n_layer; ++il) {
struct ggml_tensor * inpSA = inpL;
@@ -8178,11 +8166,10 @@ struct llm_build_context {
struct ggml_tensor * inpL;
// {n_embd, n_tokens}
- inpL = llm_build_inp_embd(ctx0, hparams, batch, model.tok_embd, lctx.inp_tokens, lctx.inp_embd, cb);
- cb(inpL, "inp_embd", -1);
+ inpL = llm_build_inp_embd(ctx0, lctx, hparams, batch, model.tok_embd, cb);
- struct ggml_tensor * state_mask = ggml_view_2d(ctx0, lctx.inp_s_mask, 1, n_kv, lctx.inp_s_mask->nb[0], 0);
- struct ggml_tensor * state_seq = ggml_view_2d(ctx0, lctx.inp_s_seq, n_kv, n_tokens, n_kv*ggml_element_size(lctx.inp_s_seq), 0);
+ struct ggml_tensor * state_mask = build_inp_s_mask();
+ struct ggml_tensor * state_seq = build_inp_s_seq();
for (int il = 0; il < n_layer; ++il) {
// (ab)using the KV cache to store the states
@@ -8234,7 +8221,7 @@ struct llm_build_context {
ggml_build_forward_expand(gf,
ggml_cpy(ctx0,
ggml_view_2d(ctx0, x_conv, d_conv - 1, d_inner*n_kv, d_conv*ggml_element_size(x_conv), (1+d_inner*n_tokens)*ggml_element_size(x_conv)),
- ggml_view_1d(ctx0, kv_self.k_l[il], (d_conv - 1)*(d_inner)*(n_kv), kv_self.head*(d_conv - 1)*(d_inner)*ggml_element_size(x_conv))));
+ ggml_view_1d(ctx0, kv_self.k_l[il], (d_conv - 1)*(d_inner)*(n_kv), kv_head*(d_conv - 1)*(d_inner)*ggml_element_size(x_conv))));
// extract x from x_conv
x = ggml_view_2d(ctx0, x_conv, d_inner, n_tokens, d_inner*ggml_element_size(x_conv), 0);
@@ -8268,7 +8255,7 @@ struct llm_build_context {
ggml_build_forward_expand(gf,
ggml_cpy(ctx0,
ggml_view_1d(ctx0, y_ssm_states, d_state*d_inner*n_kv, d_inner*n_tokens*ggml_element_size(y_ssm_states)),
- ggml_view_1d(ctx0, kv_self.v_l[il], d_state*d_inner*n_kv, kv_self.head*d_state*d_inner*ggml_element_size(ssm_states))));
+ ggml_view_1d(ctx0, kv_self.v_l[il], d_state*d_inner*n_kv, kv_head*d_state*d_inner*ggml_element_size(ssm_states))));
struct ggml_tensor * y = ggml_view_2d(ctx0, y_ssm_states, d_inner, n_tokens, d_inner*ggml_element_size(y_ssm_states), 0);
@@ -8372,7 +8359,18 @@ static struct ggml_cgraph * llama_build_graph(
if (!lctx.cparams.offload_kqv) {
if (strcmp(name, "kqv_merged_cont") == 0) {
// all nodes between the KV store and the attention output are run on the CPU
- ggml_backend_sched_set_node_backend(lctx.sched, cur, lctx.backend_cpu);
+ ggml_backend_sched_set_tensor_backend(lctx.sched, cur, lctx.backend_cpu);
+ }
+ }
+
+ // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends
+ // to fix this, we assign the norm layer manually to the backend of its layer
+ if (il != -1 && strcmp(name, "norm") == 0) {
+ for (auto * backend : lctx.backends) {
+ if (ggml_backend_buft_supports_backend(lctx.model.buft_layer[il].buft, backend)) {
+ ggml_backend_sched_set_tensor_backend(lctx.sched, cur, backend);
+ break;
+ }
}
}
};
@@ -8528,7 +8526,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
ggml_backend_tensor_set(lctx.inp_embd, batch.embd, 0, n_tokens*n_embd*ggml_element_size(lctx.inp_embd));
}
- if (batch.pos) {
+ if (batch.pos && lctx.inp_pos) {
const int64_t n_tokens = batch.n_tokens;
ggml_backend_tensor_set(lctx.inp_pos, batch.pos, 0, n_tokens*ggml_element_size(lctx.inp_pos));
@@ -8539,61 +8537,63 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
"non-causal attention with generative models is not supported"
);
- // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
- if (cparams.causal_attn) {
- const int64_t n_kv = kv_self.n;
- const int64_t n_tokens = batch.n_tokens;
+ if (lctx.inp_KQ_mask) {
+ // NOTE: hparams.causal_attn indicates the model is capable of generation and uses the kv cache.
+ if (cparams.causal_attn) {
+ const int64_t n_kv = kv_self.n;
+ const int64_t n_tokens = batch.n_tokens;
- assert(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
+ GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
- float * data = (float *) lctx.inp_KQ_mask->data;
+ float * data = (float *) lctx.inp_KQ_mask->data;
- // For causal attention, use only the previous KV cells
- // of the correct sequence for each token of the batch.
- // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
- for (int h = 0; h < 1; ++h) {
- for (int j = 0; j < n_tokens; ++j) {
- const llama_pos pos = batch.pos[j];
- const llama_seq_id seq_id = batch.seq_id[j][0];
+ // For causal attention, use only the previous KV cells
+ // of the correct sequence for each token of the batch.
+ // It's assumed that if a token in the batch has multiple sequences, they are equivalent.
+ for (int h = 0; h < 1; ++h) {
+ for (int j = 0; j < n_tokens; ++j) {
+ const llama_pos pos = batch.pos[j];
+ const llama_seq_id seq_id = batch.seq_id[j][0];
- for (int i = 0; i < n_kv; ++i) {
- float f;
- if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
- f = -INFINITY;
- } else {
- f = 0.0f;
+ for (int i = 0; i < n_kv; ++i) {
+ float f;
+ if (!lctx.kv_self.cells[i].has_seq_id(seq_id) || lctx.kv_self.cells[i].pos > pos) {
+ f = -INFINITY;
+ } else {
+ f = 0.0f;
+ }
+ data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
}
- data[h*(n_kv*n_tokens) + j*n_kv + i] = f;
}
}
- }
- } else {
- // when using kv cache, the mask needs to match the kv cache size
- const int64_t n_tokens = batch.n_tokens;
- const int64_t n_stride = hparams.causal_attn ? kv_self.n : n_tokens;
+ } else {
+ // when using kv cache, the mask needs to match the kv cache size
+ const int64_t n_tokens = batch.n_tokens;
+ const int64_t n_stride = hparams.causal_attn ? kv_self.n : n_tokens;
- assert(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
+ GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_mask->buffer));
- float * data = (float *) lctx.inp_KQ_mask->data;
+ float * data = (float *) lctx.inp_KQ_mask->data;
- for (int h = 0; h < 1; ++h) {
- for (int j = 0; j < n_tokens; ++j) {
- const llama_seq_id seq_id = batch.seq_id[j][0];
+ for (int h = 0; h < 1; ++h) {
+ for (int j = 0; j < n_tokens; ++j) {
+ const llama_seq_id seq_id = batch.seq_id[j][0];
- for (int i = 0; i < n_tokens; ++i) {
- float f = -INFINITY;
- for (int s = 0; s < batch.n_seq_id[i]; ++s) {
- if (batch.seq_id[i][s] == seq_id) {
- f = 0.0f;
- break;
+ for (int i = 0; i < n_tokens; ++i) {
+ float f = -INFINITY;
+ for (int s = 0; s < batch.n_seq_id[i]; ++s) {
+ if (batch.seq_id[i][s] == seq_id) {
+ f = 0.0f;
+ break;
+ }
}
- }
- data[h*(n_tokens*n_tokens) + j*n_stride + i] = f;
- }
+ data[h*(n_tokens*n_tokens) + j*n_stride + i] = f;
+ }
- for (int i = n_tokens; i < n_stride; ++i) {
- data[h*(n_tokens*n_tokens) + j*n_stride + i] = -INFINITY;
+ for (int i = n_tokens; i < n_stride; ++i) {
+ data[h*(n_tokens*n_tokens) + j*n_stride + i] = -INFINITY;
+ }
}
}
}
@@ -8602,7 +8602,8 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
if (hparams.need_kq_pos) {
const int64_t n_kv = kv_self.n;
- assert(ggml_backend_buffer_is_host(lctx.inp_KQ_pos->buffer));
+ GGML_ASSERT(lctx.inp_KQ_pos);
+ GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_KQ_pos->buffer));
float * data = (float *) lctx.inp_KQ_pos->data;
@@ -8614,6 +8615,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
if (cparams.pooling_type == LLAMA_POOLING_TYPE_MEAN) {
const int64_t n_tokens = batch.n_tokens;
+ GGML_ASSERT(lctx.inp_mean);
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_mean->buffer));
float * data = (float *) lctx.inp_mean->data;
@@ -8645,6 +8647,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
if (cparams.pooling_type == LLAMA_POOLING_TYPE_CLS) {
const int64_t n_tokens = batch.n_tokens;
+ GGML_ASSERT(lctx.inp_cls);
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_cls->buffer));
uint32_t * data = (uint32_t *) lctx.inp_cls->data;
@@ -8665,7 +8668,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
if (kv_self.recurrent) {
const int64_t n_kv = kv_self.n;
- {
+ if (lctx.inp_s_mask) {
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_mask->buffer));
float * data = (float *) lctx.inp_s_mask->data;
@@ -8687,7 +8690,7 @@ static void llama_set_inputs(llama_context & lctx, const llama_batch & batch) {
// update the correct state(s)/sequence(s) for each token of the batch.
// Like with the KQ_mask, if a token in the batch has multiple sequences,
// they are assumed to be equivalent (not here, but in ggml_ssm_scan and ggml_ssm_conv).
- {
+ if (lctx.inp_s_seq) {
const int64_t n_tokens = batch.n_tokens;
GGML_ASSERT(ggml_backend_buffer_is_host(lctx.inp_s_seq->buffer));
@@ -8730,7 +8733,7 @@ static void llama_graph_compute(
ggml_backend_cpu_set_abort_callback(lctx.backend_cpu, lctx.abort_callback, lctx.abort_callback_data);
}
- ggml_backend_sched_graph_compute(lctx.sched, gf);
+ ggml_backend_sched_graph_compute_async(lctx.sched, gf);
// fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(lctx.sched));
@@ -8750,10 +8753,11 @@ static void llama_graph_compute(
//
static int llama_decode_internal(
llama_context & lctx,
- llama_batch batch) {
- const uint32_t n_tokens = batch.n_tokens;
+ llama_batch batch_all) { // TODO: rename back to batch
+
+ const uint32_t n_tokens_all = batch_all.n_tokens;
- if (n_tokens == 0) {
+ if (n_tokens_all == 0) {
LLAMA_LOG_ERROR("%s: n_tokens == 0", __func__);
return -1;
}
@@ -8762,14 +8766,16 @@ static int llama_decode_internal(
const auto & hparams = model.hparams;
const auto & cparams = lctx.cparams;
- const auto n_batch = cparams.n_batch;
+ GGML_ASSERT((!batch_all.token && batch_all.embd) || (batch_all.token && !batch_all.embd)); // NOLINT
- GGML_ASSERT(n_tokens <= n_batch);
- GGML_ASSERT((!batch.token && batch.embd) || (batch.token && !batch.embd)); // NOLINT
+ GGML_ASSERT(n_tokens_all <= cparams.n_batch);
- int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
+ GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens");
- const int64_t t_start_us = ggml_time_us();
+ if (lctx.t_compute_start_us == 0) {
+ lctx.t_compute_start_us = ggml_time_us();
+ }
+ lctx.n_queued_tokens += n_tokens_all;
#ifdef GGML_USE_MPI
// TODO: needs fix after #3228
@@ -8777,272 +8783,274 @@ static int llama_decode_internal(
//ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads);
#endif
- GGML_ASSERT(n_threads > 0);
-
auto & kv_self = lctx.kv_self;
const int64_t n_embd = hparams.n_embd;
const int64_t n_vocab = hparams.n_vocab;
- // helpers for smoother batch API transition
- // after deprecating the llama_eval calls, these will be removed
- std::vector<llama_pos> pos;
- std::vector<int32_t> n_seq_id;
- std::vector<llama_seq_id *> seq_id_arr;
- std::vector<std::vector<llama_seq_id>> seq_id;
+ auto * logits_out = lctx.logits;
- if (batch.pos == nullptr) {
- pos.resize(n_tokens);
- for (uint32_t i = 0; i < n_tokens; i++) {
- pos[i] = batch.all_pos_0 + i*batch.all_pos_1;
- }
+#ifndef NDEBUG
+ auto & logits_valid = lctx.logits_valid;
+ logits_valid.clear();
+ logits_valid.resize(n_tokens_all);
- batch.pos = pos.data();
- }
+ memset(logits_out, 0, lctx.logits_size*sizeof(float));
+#endif
- if (batch.seq_id == nullptr) {
- n_seq_id.resize(n_tokens);
- seq_id.resize(n_tokens);
- seq_id_arr.resize(n_tokens);
- for (uint32_t i = 0; i < n_tokens; i++) {
- n_seq_id[i] = 1;
- seq_id[i].resize(1);
- seq_id[i][0] = batch.all_seq_id;
- seq_id_arr[i] = seq_id[i].data();
- }
+ const auto n_ubatch = cparams.n_ubatch;
- batch.n_seq_id = n_seq_id.data();
- batch.seq_id = seq_id_arr.data();
- }
+ std::vector<llama_pos> pos;
+ std::vector<int32_t> n_seq_id;
+ std::vector<llama_seq_id *> seq_id_arr;
+ std::vector<std::vector<llama_seq_id>> seq_id;
- // non-causal masks do not use the KV cache
- if (hparams.causal_attn) {
- llama_kv_cache_update(&lctx);
+ for (uint32_t cur_token = 0; cur_token < n_tokens_all; cur_token += n_ubatch) {
+ const uint32_t n_tokens = std::min(n_ubatch, n_tokens_all - cur_token);
+ llama_batch u_batch = {
+ /* .n_tokens = */ (int32_t) n_tokens,
+ /* .token = */ batch_all.token ? batch_all.token + cur_token : nullptr,
+ /* .embd = */ batch_all.embd ? batch_all.embd + cur_token*n_embd : nullptr,
+ /* .pos = */ batch_all.pos ? batch_all.pos + cur_token : nullptr,
+ /* .n_seq_id = */ batch_all.n_seq_id ? batch_all.n_seq_id + cur_token : nullptr,
+ /* .seq_id = */ batch_all.seq_id ? batch_all.seq_id + cur_token : nullptr,
+ /* .logits = */ batch_all.logits ? batch_all.logits + cur_token : nullptr,
+ /* .all_pos_0 = */ batch_all.all_pos_0 + (llama_pos) cur_token*batch_all.all_pos_1,
+ /* .all_pos_1 = */ batch_all.all_pos_1,
+ /* .all_seq_id = */ batch_all.all_seq_id,
+ };
- // if we have enough unused cells before the current head ->
- // better to start searching from the beginning of the cache, hoping to fill it
- if (kv_self.head > kv_self.used + 2*n_tokens) {
- kv_self.head = 0;
- }
+ int n_threads = n_tokens == 1 ? cparams.n_threads : cparams.n_threads_batch;
+ GGML_ASSERT(n_threads > 0);
- if (!llama_kv_cache_find_slot(kv_self, batch)) {
- return 1;
- }
+ // helpers for smoother batch API transition
+ // after deprecating the llama_eval calls, these will be removed
+ if (u_batch.pos == nullptr) {
+ pos.resize(n_tokens);
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ pos[i] = u_batch.all_pos_0 + i*u_batch.all_pos_1;
+ }
- if (!kv_self.recurrent) {
- // a heuristic, to avoid attending the full cache if it is not yet utilized
- // after enough generations, the benefit from this heuristic disappears
- // if we start defragmenting the cache, the benefit from this will be more important
- kv_self.n = std::min(kv_self.size, std::max(32u, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)));
- //kv_self.n = llama_kv_cache_cell_max(kv_self);
+ u_batch.pos = pos.data();
}
- }
-
- //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
- ggml_backend_sched_reset(lctx.sched);
- ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
+ if (u_batch.seq_id == nullptr) {
+ n_seq_id.resize(n_tokens);
+ seq_id.resize(n_tokens);
+ seq_id_arr.resize(n_tokens);
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ n_seq_id[i] = 1;
+ seq_id[i].resize(1);
+ seq_id[i][0] = u_batch.all_seq_id;
+ seq_id_arr[i] = seq_id[i].data();
+ }
- ggml_cgraph * gf = llama_build_graph(lctx, batch, false);
+ u_batch.n_seq_id = n_seq_id.data();
+ u_batch.seq_id = seq_id_arr.data();
+ }
- // the output is always the last tensor in the graph
- struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
- struct ggml_tensor * embd = gf->nodes[gf->n_nodes - 2];
+ // non-causal masks do not use the KV cache
+ if (hparams.causal_attn) {
+ llama_kv_cache_update(&lctx);
- if (!hparams.causal_attn) {
- res = nullptr; // do not extract logits for embedding models such as BERT
+ // if we have enough unused cells before the current head ->
+ // better to start searching from the beginning of the cache, hoping to fill it
+ if (kv_self.head > kv_self.used + 2*n_tokens) {
+ kv_self.head = 0;
+ }
- // token or sequence embeddings
- embd = gf->nodes[gf->n_nodes - 1];
+ if (!llama_kv_cache_find_slot(kv_self, u_batch)) {
+ return 1;
+ }
- GGML_ASSERT(strcmp(embd->name, "result_embd") == 0 || strcmp(embd->name, "result_embd_pooled") == 0);
- } else {
- if (strcmp(res->name, "result_output") == 0) {
- // the token embeddings could be the second to last tensor, or the third to last tensor
- if (strcmp(embd->name, "result_norm") != 0) {
- embd = gf->nodes[gf->n_nodes - 3];
- GGML_ASSERT(strcmp(embd->name, "result_norm") == 0);
+ if (!kv_self.recurrent) {
+ // a heuristic, to avoid attending the full cache if it is not yet utilized
+ // after enough generations, the benefit from this heuristic disappears
+ // if we start defragmenting the cache, the benefit from this will be more important
+ kv_self.n = std::min(kv_self.size, std::max(32u, GGML_PAD(llama_kv_cache_cell_max(kv_self), 32)));
+ //kv_self.n = llama_kv_cache_cell_max(kv_self);
}
- } else {
- GGML_ASSERT(false && "missing result_output tensor");
}
- }
- // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
+ //printf("kv_self.n = %5d, kv_self.used = %5d, kv_self.head = %5d\n", kv_self.n, kv_self.used, kv_self.head);
- // for big prompts, if BLAS is enabled, it is better to use only one thread
- // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
- // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
- // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
- // with the BLAS calls. need a better solution
- // MoE Special Case: This logic applies when hparams.n_expert == 0, i.e. the model is NOT an MoE model. When an MoE is
- // being processed then Accelerate/BLAS will not be involved, so capping would limit performance.
- if (n_tokens >= 32 && hparams.n_expert == 0 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
- n_threads = std::min(4, n_threads);
- }
+ ggml_backend_sched_reset(lctx.sched);
+ ggml_backend_sched_set_eval_callback(lctx.sched, lctx.cparams.cb_eval, lctx.cparams.cb_eval_user_data);
- llama_set_inputs(lctx, batch);
+ ggml_cgraph * gf = llama_build_graph(lctx, u_batch, false);
- llama_graph_compute(lctx, gf, n_threads);
+ // the output is always the last tensor in the graph
+ struct ggml_tensor * res = gf->nodes[gf->n_nodes - 1];
+ struct ggml_tensor * embd = gf->nodes[gf->n_nodes - 2];
- // update the kv ring buffer
- {
- kv_self.head += n_tokens;
-
- // Ensure kv cache head points to a valid index.
- if (kv_self.head >= kv_self.size) {
- kv_self.head = 0;
- }
- }
+ if (!hparams.causal_attn) {
+ res = nullptr; // do not extract logits for embedding models such as BERT
- // decide if we need to defrag the kv cache
- if (cparams.defrag_thold >= 0.0f) {
- const float fragmentation = kv_self.n >= 128 ? 1.0f - float(kv_self.used + n_tokens)/float(kv_self.n) : 0.0f;
+ // token or sequence embeddings
+ embd = gf->nodes[gf->n_nodes - 1];
- // queue defragmentation for next llama_kv_cache_update
- if (fragmentation > cparams.defrag_thold) {
- //LLAMA_LOG_INFO("fragmentation: %.2f\n", fragmentation);
+ GGML_ASSERT(strcmp(embd->name, "result_embd") == 0 || strcmp(embd->name, "result_embd_pooled") == 0);
+ } else {
+ if (strcmp(res->name, "result_output") == 0) {
+ // the token embeddings could be the second to last tensor, or the third to last tensor
+ if (strcmp(embd->name, "result_norm") != 0) {
+ embd = gf->nodes[gf->n_nodes - 3];
+ GGML_ASSERT(strcmp(embd->name, "result_norm") == 0);
+ }
+ } else {
+ GGML_ASSERT(false && "missing result_output tensor");
+ }
+ }
+ // LLAMA_LOG_INFO("graph build time: %.3f ms (%d nodes, %d leafs)\n", (ggml_time_us() - t_start_us)/1000.0, gf->n_nodes, gf->n_leafs);
- llama_kv_cache_defrag(kv_self);
+ // for big prompts, if BLAS is enabled, it is better to use only one thread
+ // otherwise, the threads are spin-lock waiting for the BLAS calls and are degrading the performance
+ // TODO: this is mostly important for Apple Silicon where CBLAS is still performing very well
+ // we still need some threads to process all non-mul_mat ops, but not too much to avoid interfering
+ // with the BLAS calls. need a better solution
+ // MoE Special Case: This logic applies when hparams.n_expert == 0, i.e. the model is NOT an MoE model. When an MoE is
+ // being processed then Accelerate/BLAS will not be involved, so capping would limit performance.
+ if (n_tokens >= 32 && hparams.n_expert == 0 && ggml_cpu_has_blas() && !ggml_cpu_has_gpublas()) {
+ n_threads = std::min(4, n_threads);
}
- }
-#ifdef GGML_PERF
- // print timing information per ggml operation (for debugging purposes)
- // requires GGML_PERF to be defined
- ggml_graph_print(gf);
-#endif
+ ggml_backend_sched_alloc_graph(lctx.sched, gf);
- // plot the computation graph in dot format (for debugging purposes)
- //if (n_past%100 == 0) {
- // ggml_graph_dump_dot(gf, NULL, "llama.dot");
- //}
+ llama_set_inputs(lctx, u_batch);
- // extract logits
- // TODO: do not compute and extract logits if only embeddings are needed
- // need to update the graphs to skip "result_output"
- if (res) {
- auto & logits_out = lctx.logits;
+ llama_graph_compute(lctx, gf, n_threads);
-#ifndef NDEBUG
- auto & logits_valid = lctx.logits_valid;
- logits_valid.clear();
- logits_valid.resize(n_tokens);
+ // update the kv ring buffer
+ {
+ kv_self.head += n_tokens;
- logits_out.clear();
-#endif
+ // Ensure kv cache head points to a valid index.
+ if (kv_self.head >= kv_self.size) {
+ kv_self.head = 0;
+ }
+ }
- ggml_backend_t backend_res = ggml_backend_sched_get_node_backend(lctx.sched, res);
- GGML_ASSERT(backend_res != nullptr);
+#ifdef GGML_PERF
+ // print timing information per ggml operation (for debugging purposes)
+ // requires GGML_PERF to be defined
+ ggml_graph_print(gf);
+#endif
- if (batch.logits) {
- logits_out.resize(n_vocab * n_tokens);
- int32_t i_first = -1;
- for (uint32_t i = 0; i < n_tokens; i++) {
- if (batch.logits[i] && i_first == -1) {
- i_first = (int32_t) i;
- }
- if (batch.logits[i] == 0 || i == n_tokens - 1) {
- if (i_first != -1) {
- int i_last = batch.logits[i] == 0 ? i : i + 1;
- // extract logits for the range [i_first, i_last)
- // group the requests to minimize the number of calls to the backend
- ggml_backend_tensor_get_async(backend_res, res,
- logits_out.data() + (n_vocab*i_first),
- (n_vocab*i_first)*sizeof(float),
- (i_last - i_first)*n_vocab*sizeof(float));
- i_first = -1;
+ // plot the computation graph in dot format (for debugging purposes)
+ //if (n_past%100 == 0) {
+ // ggml_graph_dump_dot(gf, NULL, "llama.dot");
+ //}
+
+ // extract logits
+ // TODO: do not compute and extract logits if only embeddings are needed
+ // update the graphs to skip "result_output" if logits are not needed
+ if (res) {
+ ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(lctx.sched, res);
+ GGML_ASSERT(backend_res != nullptr);
+ if (u_batch.logits) {
+ int32_t i_first = -1;
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ if (u_batch.logits[i] && i_first == -1) {
+ i_first = (int32_t) i;
+ }
+ if (u_batch.logits[i] == 0 || i == n_tokens - 1) {
+ if (i_first != -1) {
+ int i_last = u_batch.logits[i] == 0 ? i : i + 1;
+ // extract logits for the range [i_first, i_last)
+ // group the requests to minimize the number of calls to the backend
+ ggml_backend_tensor_get_async(backend_res, res,
+ logits_out + n_vocab*(cur_token + i_first),
+ i_first*n_vocab*sizeof(float),
+ (i_last - i_first)*n_vocab*sizeof(float));
+ i_first = -1;
+ }
}
- }
#ifndef NDEBUG
- logits_valid[i] = batch.logits[i] != 0;
+ logits_valid[cur_token + i] = u_batch.logits[i] != 0;;
#endif
- }
- } else if (lctx.logits_all) {
- logits_out.resize(n_vocab*n_tokens);
- ggml_backend_tensor_get_async(backend_res, res, logits_out.data(), 0, n_vocab*n_tokens*sizeof(float));
+ }
+ } else if (lctx.logits_all) {
+ ggml_backend_tensor_get_async(backend_res, res, logits_out + n_vocab*cur_token, 0, n_vocab*n_tokens*sizeof(float));
#ifndef NDEBUG
- std::fill(logits_valid.begin(), logits_valid.end(), true);
+ std::fill(logits_valid.begin() + cur_token, logits_valid.begin() + cur_token + n_tokens, true);
#endif
- } else {
- logits_out.resize(n_vocab);
- ggml_backend_tensor_get_async(backend_res, res, logits_out.data(), (n_vocab*(n_tokens - 1))*sizeof(float), n_vocab*sizeof(float));
+ } else {
+ if (cur_token + n_tokens >= n_tokens_all) {
+ ggml_backend_tensor_get_async(backend_res, res, logits_out, n_vocab*(n_tokens - 1)*sizeof(float), n_vocab*sizeof(float));
#ifndef NDEBUG
- logits_valid[0] = true;
+ logits_valid[0] = true;
#endif
+ }
+ }
}
- ggml_backend_synchronize(backend_res);
- }
- // extract embeddings
- if (cparams.embeddings && embd) {
- ggml_backend_t backend_embd = ggml_backend_sched_get_node_backend(lctx.sched, embd);
- GGML_ASSERT(backend_embd != nullptr);
-
- switch (cparams.pooling_type) {
- case LLAMA_POOLING_TYPE_NONE:
- {
- // extract token embeddings
- auto & embd_out = lctx.embd;
+ // extract embeddings
+ if (cparams.embeddings && embd) {
+ ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(lctx.sched, embd);
+ GGML_ASSERT(backend_embd != nullptr);
- if (batch.logits) {
- embd_out.resize(n_embd * n_tokens);
- for (uint32_t i = 0; i < n_tokens; i++) {
- if (batch.logits[i] == 0) {
- continue;
+ switch (cparams.pooling_type) {
+ case LLAMA_POOLING_TYPE_NONE:
+ {
+ // extract token embeddings
+ auto & embd_out = lctx.embd;
+
+ if (u_batch.logits) {
+ //embd_out.resize(n_embd * n_tokens);
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ if (u_batch.logits[i] == 0) {
+ continue;
+ }
+ ggml_backend_tensor_get_async(backend_embd, embd, embd_out + n_embd*(i + cur_token), (n_embd*i)*sizeof(float), n_embd*sizeof(float));
}
-
- ggml_backend_tensor_get_async(backend_embd, embd, embd_out.data() + (n_embd*i), (n_embd*i)*sizeof(float), n_embd*sizeof(float));
}
- }
- } break;
- case LLAMA_POOLING_TYPE_CLS:
- case LLAMA_POOLING_TYPE_MEAN:
- {
- GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0);
+ } break;
+ case LLAMA_POOLING_TYPE_CLS:
+ case LLAMA_POOLING_TYPE_MEAN:
+ {
+ GGML_ASSERT(strcmp(embd->name, "result_embd_pooled") == 0);
- // extract sequence embeddings
- auto & embd_seq_out = lctx.embd_seq;
- embd_seq_out.clear();
+ // extract sequence embeddings
+ auto & embd_seq_out = lctx.embd_seq;
+ embd_seq_out.clear();
- for (uint32_t i = 0; i < n_tokens; i++) {
- const llama_seq_id seq_id = batch.seq_id[i][0];
- if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
- continue;
+ for (uint32_t i = 0; i < n_tokens; i++) {
+ const llama_seq_id seq_id = u_batch.seq_id[i][0];
+ if (embd_seq_out.find(seq_id) != embd_seq_out.end()) {
+ continue;
+ }
+ embd_seq_out[seq_id].resize(n_embd);
+ ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
}
- embd_seq_out[seq_id].resize(n_embd);
- ggml_backend_tensor_get_async(backend_embd, embd, embd_seq_out[seq_id].data(), (n_embd*seq_id)*sizeof(float), n_embd*sizeof(float));
- }
- } break;
- case LLAMA_POOLING_TYPE_UNSPECIFIED:
- {
- GGML_ASSERT(false && "unknown pooling type");
- } break;
+ } break;
+ case LLAMA_POOLING_TYPE_UNSPECIFIED:
+ {
+ GGML_ASSERT(false && "unknown pooling type");
+ } break;
+ }
}
- ggml_backend_synchronize(backend_embd);
}
- // measure the performance only for the single-token evals
- if (n_tokens == 1) {
- lctx.t_eval_us += ggml_time_us() - t_start_us;
- lctx.n_eval++;
- }
- else if (n_tokens > 1) {
- lctx.t_p_eval_us += ggml_time_us() - t_start_us;
- lctx.n_p_eval += n_tokens;
- }
+ // wait for the computation to finish (automatically done when obtaining the model output)
+ //llama_synchronize(&lctx);
- // get a more accurate load time, upon first eval
- // TODO: fix this
- if (!lctx.has_evaluated_once) {
- lctx.t_load_us = ggml_time_us() - lctx.t_start_us;
- lctx.has_evaluated_once = true;
+ // decide if we need to defrag the kv cache
+ if (cparams.defrag_thold >= 0.0f) {
+ const float fragmentation = kv_self.n >= 128 ? 1.0f - float(kv_self.used + n_tokens_all)/float(kv_self.n) : 0.0f;
+
+ // queue defragmentation for next llama_kv_cache_update
+ if (fragmentation > cparams.defrag_thold) {
+ //LLAMA_LOG_INFO("fragmentation: %.2f\n", fragmentation);
+
+ llama_kv_cache_defrag(kv_self);
+ }
}
return 0;
}
+
// find holes from the beginning of the KV cache and fill them by moving data from the end of the cache
static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
auto & kv_self = lctx.kv_self;
@@ -9242,6 +9250,8 @@ static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
#else
// ggml_graph defrag
+ ggml_backend_sched_reset(lctx.sched);
+
ggml_cgraph * gf = llama_build_graph_defrag(lctx, ids);
llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
@@ -9253,14 +9263,22 @@ static void llama_kv_cache_defrag_internal(struct llama_context & lctx) {
}
static void llama_kv_cache_update_internal(struct llama_context & lctx) {
+ bool need_reserve = false;
+
// apply K-shift if needed
if (lctx.model.hparams.rope_type != LLAMA_ROPE_TYPE_NONE && lctx.kv_self.has_shift) {
- llama_set_k_shift(lctx);
-
{
+ ggml_backend_sched_reset(lctx.sched);
+
ggml_cgraph * gf = llama_build_graph_k_shift(lctx);
+ ggml_backend_sched_alloc_graph(lctx.sched, gf);
+
+ llama_set_k_shift(lctx);
+
llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
+
+ need_reserve = true;
}
{
@@ -9275,12 +9293,18 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
}
if (lctx.kv_self.recurrent && lctx.kv_self.do_copy) {
- llama_set_s_copy(lctx);
-
{
+ ggml_backend_sched_reset(lctx.sched);
+
ggml_cgraph * gf = llama_build_graph_s_copy(lctx);
+ ggml_backend_sched_alloc_graph(lctx.sched, gf);
+
+ llama_set_s_copy(lctx);
+
llama_graph_compute(lctx, gf, lctx.cparams.n_threads);
+
+ need_reserve = true;
}
{
@@ -9298,8 +9322,26 @@ static void llama_kv_cache_update_internal(struct llama_context & lctx) {
if (lctx.kv_self.do_defrag) {
llama_kv_cache_defrag_internal(lctx);
+ need_reserve = true;
+
lctx.kv_self.do_defrag = false;
}
+
+ // reserve a worst case graph again
+ if (need_reserve) {
+ // TODO: extract to a function
+ // build worst-case graph
+ int n_tokens = (int)std::min(lctx.cparams.n_ctx, lctx.cparams.n_ubatch);
+ int n_past = lctx.cparams.n_ctx - n_tokens;
+ llama_token token = llama_token_bos(&lctx.model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
+ ggml_cgraph * gf = llama_build_graph(lctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
+
+ // initialize scheduler with the worst-case graph
+ ggml_backend_sched_reset(lctx.sched);
+ if (!ggml_backend_sched_reserve(lctx.sched, gf)) {
+ LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__);
+ }
+ }
}
//
@@ -12537,7 +12579,8 @@ struct llama_context_params llama_context_default_params() {
struct llama_context_params result = {
/*.seed =*/ LLAMA_DEFAULT_SEED,
/*.n_ctx =*/ 512,
- /*.n_batch =*/ 512,
+ /*.n_batch =*/ 2048,
+ /*.n_ubatch =*/ 512,
/*.n_seq_max =*/ 1,
/*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default
/*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS,
@@ -12691,6 +12734,17 @@ struct llama_context * llama_new_context_with_model(
struct llama_context_params params) {
if (!model) {
+ LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__);
+ return nullptr;
+ }
+
+ if (params.n_batch == 0 && params.n_ubatch == 0) {
+ LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__);
+ return nullptr;
+ }
+
+ if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) {
+ LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__);
return nullptr;
}
@@ -12699,7 +12753,6 @@ struct llama_context * llama_new_context_with_model(
const auto & hparams = model->hparams;
auto & cparams = ctx->cparams;
- cparams.n_batch = params.n_batch;
// TODO: maybe add n_seq_max here too
cparams.n_threads = params.n_threads;
cparams.n_threads_batch = params.n_threads_batch;
@@ -12716,6 +12769,11 @@ struct llama_context * llama_new_context_with_model(
cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base;
cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale;
+ // with causal attention, the batch size is limited by the context size
+ cparams.n_batch = hparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch;
+ cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch);
+
+
cparams.n_yarn_orig_ctx = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx :
hparams.n_yarn_orig_ctx != 0 ? hparams.n_yarn_orig_ctx :
hparams.n_ctx_train;
@@ -12751,6 +12809,8 @@ struct llama_context * llama_new_context_with_model(
}
LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx);
+ LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch);
+ LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch);
LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base);
LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale);
@@ -12895,54 +12955,31 @@ struct llama_context * llama_new_context_with_model(
ggml_type_name(type_v), (float)memory_size_v / (1024.0f * 1024.0f));
}
- // resized during inference, reserve maximum
- ctx->logits.reserve(hparams.n_vocab*cparams.n_batch);
+ // graph outputs buffer
+ {
+ // resized during inference, reserve maximum
+ ctx->logits_size = hparams.n_vocab*cparams.n_batch;
+ ctx->embd_size = params.embeddings ? hparams.n_embd*cparams.n_batch : 0;
- if (params.embeddings) {
- ctx->embd.reserve(hparams.n_embd*cparams.n_batch);
- }
+ const size_t buf_output_size = (ctx->logits_size + ctx->embd_size)*sizeof(float);
- // graph inputs
- {
- ggml_init_params init_params = {
- /* .mem_size */ ggml_tensor_overhead()*(8 + 3*(ctx->kv_self.recurrent)),
- /* .mem_buffer */ nullptr,
- /* .no_alloc */ true,
- };
- ctx->ctx_input = ggml_init(init_params);
-
- ctx->inp_tokens = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
- ctx->inp_embd = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, hparams.n_embd, cparams.n_batch);
- ctx->inp_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
- ctx->inp_KQ_mask = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, kv_size, cparams.n_batch);
- ctx->inp_KQ_pos = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_F32, kv_size);
- ctx->inp_K_shift = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, kv_size);
- ctx->inp_mean = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_F32, cparams.n_batch, cparams.n_batch);
- ctx->inp_cls = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, cparams.n_batch);
- if (ctx->kv_self.recurrent) {
- ctx->inp_s_copy = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_I32, kv_size);
- ctx->inp_s_mask = ggml_new_tensor_1d(ctx->ctx_input, GGML_TYPE_F32, kv_size);
- ctx->inp_s_seq = ggml_new_tensor_2d(ctx->ctx_input, GGML_TYPE_I32, kv_size, cparams.n_batch);
- }
-
- ggml_set_name(ctx->inp_tokens, "inp_tokens");
- ggml_set_name(ctx->inp_embd, "inp_embd");
- ggml_set_name(ctx->inp_pos, "inp_pos");
- ggml_set_name(ctx->inp_KQ_mask, "inp_KQ_mask");
- ggml_set_name(ctx->inp_KQ_pos, "inp_KQ_pos");
- ggml_set_name(ctx->inp_K_shift, "inp_K_shift");
- ggml_set_name(ctx->inp_mean, "inp_mean");
- ggml_set_name(ctx->inp_cls, "inp_cls");
- if (ctx->kv_self.recurrent) {
- ggml_set_name(ctx->inp_s_copy, "inp_s_copy");
- ggml_set_name(ctx->inp_s_mask, "inp_s_mask");
- ggml_set_name(ctx->inp_s_seq, "inp_s_seq");
- }
-
- ctx->buf_input = ggml_backend_alloc_ctx_tensors_from_buft(ctx->ctx_input, llama_default_buffer_type_cpu(true));
- LLAMA_LOG_INFO("%s: %10s input buffer size = %8.2f MiB\n", __func__,
- ggml_backend_buffer_name(ctx->buf_input),
- ggml_backend_buffer_get_size(ctx->buf_input) / 1024.0 / 1024.0);
+ ctx->buf_output = ggml_backend_buft_alloc_buffer(llama_default_buffer_type_cpu(true), buf_output_size);
+ if (ctx->buf_output == nullptr) {
+ LLAMA_LOG_ERROR("%s: failed to allocate logits buffer\n", __func__);
+ llama_free(ctx);
+ return nullptr;
+ }
+ ggml_backend_buffer_clear(ctx->buf_output, 0);
+
+
+ ctx->logits = (float *) ggml_backend_buffer_get_base(ctx->buf_output);
+ if (params.embeddings) {
+ ctx->embd = ctx->logits + ctx->logits_size;
+ }
+
+ LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__,
+ ggml_backend_buffer_name(ctx->buf_output),
+ ggml_backend_buffer_get_size(ctx->buf_output) / 1024.0 / 1024.0);
}
// scheduler and compute buffers
@@ -12961,10 +12998,21 @@ struct llama_context * llama_new_context_with_model(
// buffer used to store the computation graph and the tensor meta data
ctx->buf_compute_meta.resize(ggml_tensor_overhead()*LLAMA_MAX_NODES + ggml_graph_overhead_custom(LLAMA_MAX_NODES, false));
- ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), LLAMA_MAX_NODES);
+ // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary
+ bool pipeline_parallel = llama_get_device_count() > 1 && model->n_gpu_layers > (int)model->hparams.n_layer && model->split_mode == LLAMA_SPLIT_MODE_LAYER;
+#ifndef GGML_USE_CUBLAS
+ // pipeline parallelism requires support for async compute and events
+ // currently this is only implemented in the CUDA backend
+ pipeline_parallel = false;
+#endif
+ ctx->sched = ggml_backend_sched_new(ctx->backends.data(), backend_buft.data(), ctx->backends.size(), LLAMA_MAX_NODES, pipeline_parallel);
+
+ if (pipeline_parallel) {
+ LLAMA_LOG_INFO("%s: pipeline parallelism enabled (n_copies=%d)\n", __func__, ggml_backend_sched_get_n_copies(ctx->sched));
+ }
// build worst-case graph
- int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_batch);
+ int n_tokens = (int)std::min(cparams.n_ctx, cparams.n_ubatch);
int n_past = cparams.n_ctx - n_tokens;
llama_token token = llama_token_bos(&ctx->model); // not actually used by llama_build_graph, but required to choose between token and embedding inputs graph
ggml_cgraph * gf = llama_build_graph(*ctx, llama_batch_get_one(&token, n_tokens, n_past, 0), true);
@@ -12987,7 +13035,7 @@ struct llama_context * llama_new_context_with_model(
// note: the number of splits during measure is higher than during inference due to the kv shift
int n_splits = ggml_backend_sched_get_n_splits(ctx->sched);
- LLAMA_LOG_INFO("%s: graph splits (measure): %d\n", __func__, n_splits);
+ LLAMA_LOG_INFO("%s: graph splits: %d\n", __func__, n_splits);
}
}
@@ -13024,6 +13072,10 @@ uint32_t llama_n_batch(const struct llama_context * ctx) {
return ctx->cparams.n_batch;
}
+uint32_t llama_n_ubatch(const struct llama_context * ctx) {
+ return ctx->cparams.n_ubatch;
+}
+
uint32_t llama_n_seq_max(const struct llama_context * ctx) {
return ctx->kv_self.size;
}
@@ -13347,9 +13399,9 @@ size_t llama_get_state_size(const struct llama_context * ctx) {
const size_t s_rng = LLAMA_MAX_RNG_STATE;
const size_t s_logits_size = sizeof(size_t);
// assume worst case for logits although only currently set ones are serialized
- const size_t s_logits = ctx->logits.capacity() * sizeof(float);
+ const size_t s_logits = ctx->logits_size * sizeof(float);
const size_t s_embedding_size = sizeof(size_t);
- const size_t s_embedding = ctx->embd.capacity() * sizeof(float);
+ const size_t s_embedding = ctx->embd_size * sizeof(float);
const size_t s_kv_buf_size = sizeof(size_t);
const size_t s_kv_head = sizeof(uint32_t);
const size_t s_kv_size = sizeof(uint32_t);
@@ -13447,23 +13499,23 @@ static void llama_copy_state_data_internal(struct llama_context * ctx, llama_dat
// copy logits
{
- const size_t logits_size = ctx->logits.size();
+ const size_t logits_size = ctx->logits_size;
data_ctx->write(&logits_size, sizeof(logits_size));
if (logits_size) {
- data_ctx->write(ctx->logits.data(), logits_size * sizeof(float));
+ data_ctx->write(ctx->logits, logits_size * sizeof(float));
}
}
// copy embeddings
{
- const size_t embeddings_size = ctx->embd.size();
+ const size_t embeddings_size = ctx->embd_size;
data_ctx->write(&embeddings_size, sizeof(embeddings_size));
if (embeddings_size) {
- data_ctx->write(ctx->embd.data(), embeddings_size * sizeof(float));
+ data_ctx->write(ctx->embd, embeddings_size * sizeof(float));
}
}
@@ -13566,12 +13618,10 @@ size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
memcpy(&logits_size, inp, sizeof(logits_size)); inp += sizeof(logits_size);
- GGML_ASSERT(ctx->logits.capacity() >= logits_size);
+ GGML_ASSERT(ctx->logits_size >= logits_size);
if (logits_size) {
- ctx->logits.resize(logits_size);
-
- memcpy(ctx->logits.data(), inp, logits_size * sizeof(float));
+ memcpy(ctx->logits, inp, logits_size * sizeof(float));
inp += logits_size * sizeof(float);
}
}
@@ -13582,12 +13632,10 @@ size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src) {
memcpy(&embeddings_size, inp, sizeof(embeddings_size)); inp += sizeof(embeddings_size);
- GGML_ASSERT(ctx->embd.capacity() == embeddings_size);
+ GGML_ASSERT(ctx->embd_size == embeddings_size);
if (embeddings_size) {
- ctx->embd.resize(embeddings_size);
-
- memcpy(ctx->embd.data(), inp, embeddings_size * sizeof(float));
+ memcpy(ctx->embd, inp, embeddings_size * sizeof(float));
inp += embeddings_size * sizeof(float);
}
}
@@ -13842,24 +13890,61 @@ int32_t llama_decode(
return ret;
}
+void llama_synchronize(struct llama_context * ctx) {
+ ggml_backend_sched_synchronize(ctx->sched);
+
+ // FIXME: if multiple single tokens are evaluated without a synchronization,
+ // the stats will be added to the prompt evaluation stats
+ // this should only happen when using batch size 1 to evaluate a batch
+
+ // add the evaluation to the stats
+ if (ctx->n_queued_tokens == 1) {
+ ctx->t_eval_us += ggml_time_us() - ctx->t_compute_start_us;
+ ctx->n_eval++;
+ } else if (ctx->n_queued_tokens > 1) {
+ ctx->t_p_eval_us += ggml_time_us() - ctx->t_compute_start_us;
+ ctx->n_p_eval += ctx->n_queued_tokens;
+ }
+
+ // get a more accurate load time, upon first eval
+ if (ctx->n_queued_tokens > 0 && !ctx->has_evaluated_once) {
+ ctx->t_load_us = ggml_time_us() - ctx->t_start_us;
+ ctx->has_evaluated_once = true;
+ }
+
+ ctx->n_queued_tokens = 0;
+ ctx->t_compute_start_us = 0;
+}
+
float * llama_get_logits(struct llama_context * ctx) {
- return ctx->logits.data();
+ llama_synchronize(ctx);
+
+ return ctx->logits;
}
float * llama_get_logits_ith(struct llama_context * ctx, int32_t i) {
assert(ctx->logits_valid.at(i));
- return ctx->logits.data() + i*ctx->model.hparams.n_vocab;
+
+ llama_synchronize(ctx);
+
+ return ctx->logits + i*ctx->model.hparams.n_vocab;
}
float * llama_get_embeddings(struct llama_context * ctx) {
- return ctx->embd.data();
+ llama_synchronize(ctx);
+
+ return ctx->embd;
}
float * llama_get_embeddings_ith(struct llama_context * ctx, int32_t i) {
- return ctx->embd.data() + i*ctx->model.hparams.n_embd;
+ llama_synchronize(ctx);
+
+ return ctx->embd + i*ctx->model.hparams.n_embd;
}
float * llama_get_embeddings_seq(struct llama_context * ctx, llama_seq_id seq_id) {
+ llama_synchronize(ctx);
+
auto it = ctx->embd_seq.find(seq_id);
if (it == ctx->embd_seq.end()) {
return nullptr;