From a366a3d17d8f2de0eb8c3d9eddc7b5840fb5761a Mon Sep 17 00:00:00 2001 From: saood06 Date: Mon, 10 Feb 2025 09:40:38 -0600 Subject: Load all MoE experts during warmup and make warmup 1 token (#198) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Load all MoE experts during warmup Co-authored-by: Stanisław Szymczyk * Unify warmup to one token --------- Co-authored-by: Stanisław Szymczyk --- examples/llama-bench/llama-bench.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'examples/llama-bench/llama-bench.cpp') diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp index 41b93df5..95df06dc 100644 --- a/examples/llama-bench/llama-bench.cpp +++ b/examples/llama-bench/llama-bench.cpp @@ -1586,7 +1586,7 @@ int main(int argc, char ** argv) { if (params.warmup) { if (t.n_prompt > 0) { //test_prompt(ctx, std::min(t.n_batch, std::min(t.n_prompt, 32)), 0, t.n_batch, t.n_threads); - test_prompt(ctx, t.n_prompt, 0, t.n_batch, t.n_threads); + test_prompt(ctx, 1, 0, t.n_batch, t.n_threads); } if (t.n_gen > 0) { test_gen(ctx, 1, 0, t.n_threads); -- cgit v1.2.3