summaryrefslogtreecommitdiff
path: root/common/common.cpp
diff options
context:
space:
mode:
authorsaood06 <saood05@gmail.com>2025-02-10 09:40:38 -0600
committerGitHub <noreply@github.com>2025-02-10 17:40:38 +0200
commita366a3d17d8f2de0eb8c3d9eddc7b5840fb5761a (patch)
treeae0cb943fb4b83cb9e24d1a51d15550d5d7f0903 /common/common.cpp
parentc12f73ba6153d162f36434cb48e36dd3649b7701 (diff)
Load all MoE experts during warmup and make warmup 1 token (#198)
* Load all MoE experts during warmup Co-authored-by: Stanisław Szymczyk <sszymczy@gmail.com> * Unify warmup to one token --------- Co-authored-by: Stanisław Szymczyk <sszymczy@gmail.com>
Diffstat (limited to 'common/common.cpp')
-rw-r--r--common/common.cpp6
1 files changed, 4 insertions, 2 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 6219f0ce..44678d7a 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -2169,8 +2169,10 @@ struct llama_init_result llama_init_from_gpt_params(gpt_params & params) {
if (bos != -1) {
tmp.push_back(bos);
}
- tmp.push_back(eos);
-
+ else
+ {
+ tmp.push_back(eos);
+ }
if (llama_model_has_encoder(model)) {
llama_encode(lctx, llama_batch_get_one(tmp.data(), tmp.size(), 0, 0));
llama_token decoder_start_token_id = llama_model_decoder_start_token(model);