summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-05-12 07:50:26 +0300
committerGitHub <noreply@github.com>2025-05-12 07:50:26 +0300
commit1d2da7feaee3e4dd1b78fb4108988c977b47e266 (patch)
tree4449c9c892dde93d5b8cae1389454a0d099c581e /examples
parentf27cd405422307e02dffa8949ac30bc56b4d2900 (diff)
Add batch warmup to sweep-bench (#375)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/sweep-bench/sweep-bench.cpp18
1 files changed, 17 insertions, 1 deletions
diff --git a/examples/sweep-bench/sweep-bench.cpp b/examples/sweep-bench/sweep-bench.cpp
index 27510687..31dd3ce0 100644
--- a/examples/sweep-bench/sweep-bench.cpp
+++ b/examples/sweep-bench/sweep-bench.cpp
@@ -107,7 +107,7 @@ int main(int argc, char ** argv) {
llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
// warm up
- {
+ if (params.warmup) {
llama_batch_add(batch, bos, 0, { 0 }, false);
if (!decode_helper(ctx, batch, ctx_params.n_batch)) {
@@ -115,6 +115,22 @@ int main(int argc, char ** argv) {
return 1;
}
}
+ if (params.batch_warmup) {
+ // clean up KV cache after generation
+ llama_kv_cache_seq_rm(ctx, 0, params.n_ubatch, -1);
+
+ // prepare batch of pp size for prompt processing performance measurement
+ llama_batch_clear(batch);
+
+ for (unsigned int i = 0; i < params.n_ubatch; ++i) {
+ llama_batch_add(batch, std::rand() % n_vocab, i, { 0 }, false);
+ }
+
+ if (!decode_helper(ctx, batch, ctx_params.n_ubatch)) {
+ LOG_TEE("%s: llama_decode() failed\n", __func__);
+ return 1;
+ }
+ }
llama_batch_clear(batch);
llama_kv_cache_clear(ctx);