From a89adaa78f505675be7be6180f419b4b0158c15a Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Sun, 2 Mar 2025 13:47:38 +0200 Subject: SER - Smart Expert Reduction (#239) * A better way to measure the cost of ggml_barrier * Smart expert selection * Add ser option to llama-bench --------- Co-authored-by: Iwan Kawrakow --- common/common.h | 2 ++ 1 file changed, 2 insertions(+) (limited to 'common/common.h') diff --git a/common/common.h b/common/common.h index f35f3558..f6a55885 100644 --- a/common/common.h +++ b/common/common.h @@ -178,6 +178,8 @@ struct gpt_params { int mla_attn = 0; // MLA 0: standard attention, 1: MLA with K and transposed V cache, 2: MLA with just K cache int attn_max_batch = 0; // Max batch size to use when computing attention (only applicable if flash_attn = false) bool fused_moe_up_gate = false; // fused up*unary(gate) op for MoE models + int min_experts = -1; + float thresh_experts = 0; bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix bool ignore_eos = false; // ignore generated EOS tokens -- cgit v1.2.3