From 2d770505a89a99ce78a5950cf14fc06d3176ffa4 Mon Sep 17 00:00:00 2001 From: Roland <14355895+rbur0425@users.noreply.github.com> Date: Fri, 15 Sep 2023 03:28:45 -0400 Subject: llama : remove mtest (#3177) * Remove mtest * remove from common/common.h and examples/main/main.cpp --- examples/main/main.cpp | 17 ----------------- 1 file changed, 17 deletions(-) (limited to 'examples/main/main.cpp') diff --git a/examples/main/main.cpp b/examples/main/main.cpp index baec6ba1..a8179f1b 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -198,23 +198,6 @@ int main(int argc, char ** argv) { params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info()); } - // determine the maximum memory usage needed to do inference for the given n_batch and n_ctx parameters - // uncomment the "used_mem" line in llama.cpp to see the results - if (params.mem_test) { - { - LOG_TEE("%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx); - - const std::vector tmp(params.n_batch, llama_token_bos(ctx)); - llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads); - } - - llama_print_timings(ctx); - llama_free(ctx); - llama_free_model(model); - - return 0; - } - // export the cgraph and exit if (params.export_cgraph) { llama_eval_export(ctx, "llama.ggml"); -- cgit v1.2.3