From a366a3d17d8f2de0eb8c3d9eddc7b5840fb5761a Mon Sep 17 00:00:00 2001 From: saood06 Date: Mon, 10 Feb 2025 09:40:38 -0600 Subject: Load all MoE experts during warmup and make warmup 1 token (#198) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Load all MoE experts during warmup Co-authored-by: Stanisław Szymczyk * Unify warmup to one token --------- Co-authored-by: Stanisław Szymczyk --- common/common.cpp | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) (limited to 'common') diff --git a/common/common.cpp b/common/common.cpp index 6219f0ce..44678d7a 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -2169,8 +2169,10 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) { if (bos != -1) { tmp.push_back(bos); } - tmp.push_back(eos); - + else + { + tmp.push_back(eos); + } if (llama_model_has_encoder(model)) { llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0)); llama_token decoder_start_token_id = llama_model_decoder_start_token(model); -- cgit v1.2.3