diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2025-03-02 13:47:38 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2025-03-02 13:47:38 +0200 |
commit | a89adaa78f505675be7be6180f419b4b0158c15a (patch) | |
tree | ad82fa3ad44f66f37885bdf0d0d025166eff9535 /common/common.h | |
parent | ef9a3d17b52bb5f6d55f7ef7e05e41e22f2ad81d (diff) |
SER - Smart Expert Reduction (#239)
* A better way to measure the cost of ggml_barrier
* Smart expert selection
* Add ser option to llama-bench
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'common/common.h')
-rw-r--r-- | common/common.h | 2 |
1 files changed, 2 insertions, 0 deletions
diff --git a/common/common.h b/common/common.h index f35f3558..f6a55885 100644 --- a/common/common.h +++ b/common/common.h @@ -178,6 +178,8 @@ struct gpt_params { int mla_attn = 0; // MLA 0: standard attention, 1: MLA with K and transposed V cache, 2: MLA with just K cache int attn_max_batch = 0; // Max batch size to use when computing attention (only applicable if flash_attn = false) bool fused_moe_up_gate = false; // fused up*unary(gate) op for MoE models + int min_experts = -1; + float thresh_experts = 0; bool input_prefix_bos = false; // prefix BOS to user inputs, preceding input_prefix bool ignore_eos = false; // ignore generated EOS tokens |