From 3ab8b3a92ede46df88bc5a2dfca3777de4a2b2b6 Mon Sep 17 00:00:00 2001 From: Pierrick Hymbert Date: Fri, 1 Mar 2024 12:39:06 +0100 Subject: llama : cleanup unused mmq flags (#5772) * cleanup unused --no-mul-mat-q,-nommq, -mmq, --mul-mat-q, mul_mat_q * remove: mul_mat_q in compare llama bench and usage * update llama-bench --------- Co-authored-by: slaren --- common/common.cpp | 2 -- 1 file changed, 2 deletions(-) (limited to 'common/common.cpp') diff --git a/common/common.cpp b/common/common.cpp index 18289755..bf1ed8a6 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -1281,7 +1281,6 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param cparams.n_batch = params.n_batch; cparams.n_threads = params.n_threads; cparams.n_threads_batch = params.n_threads_batch == -1 ? params.n_threads : params.n_threads_batch; - cparams.mul_mat_q = params.mul_mat_q; cparams.seed = params.seed; cparams.logits_all = params.logits_all; cparams.embedding = params.embedding; @@ -1725,7 +1724,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict); fprintf(stream, "n_probs: %d # only used by server binary, default: 0\n", sparams.n_probs); fprintf(stream, "no_mmap: %s # default: false\n", !params.use_mmap ? "true" : "false"); - fprintf(stream, "no_mul_mat_q: %s # default: false\n", !params.mul_mat_q ? "true" : "false"); fprintf(stream, "no_penalize_nl: %s # default: false\n", !sparams.penalize_nl ? "true" : "false"); fprintf(stream, "ppl_output_type: %d # default: 0\n", params.ppl_output_type); fprintf(stream, "ppl_stride: %d # default: 0\n", params.ppl_stride); -- cgit v1.2.3