summaryrefslogtreecommitdiff
path: root/examples/main/main.cpp
diff options
context:
space:
mode:
authorRoland <14355895+rbur0425@users.noreply.github.com>2023-09-15 03:28:45 -0400
committerGitHub <noreply@github.com>2023-09-15 10:28:45 +0300
commit2d770505a89a99ce78a5950cf14fc06d3176ffa4 (patch)
tree4a45ba87ef9d45a5c11322c30bb98e1690c69f6b /examples/main/main.cpp
parent98311c427739e3b06527c3ce6b5c021ab6692740 (diff)
llama : remove mtest (#3177)
* Remove mtest * remove from common/common.h and examples/main/main.cpp
Diffstat (limited to 'examples/main/main.cpp')
-rw-r--r--examples/main/main.cpp17
1 files changed, 0 insertions, 17 deletions
diff --git a/examples/main/main.cpp b/examples/main/main.cpp
index baec6ba1..a8179f1b 100644
--- a/examples/main/main.cpp
+++ b/examples/main/main.cpp
@@ -198,23 +198,6 @@ int main(int argc, char ** argv) {
params.n_threads, std::thread::hardware_concurrency(), llama_print_system_info());
}
- // determine the maximum memory usage needed to do inference for the given n_batch and n_ctx parameters
- // uncomment the "used_mem" line in llama.cpp to see the results
- if (params.mem_test) {
- {
- LOG_TEE("%s: testing memory usage for n_batch = %d, n_ctx = %d\n", __func__, params.n_batch, params.n_ctx);
-
- const std::vector<llama_token> tmp(params.n_batch, llama_token_bos(ctx));
- llama_eval(ctx, tmp.data(), tmp.size(), params.n_ctx, params.n_threads);
- }
-
- llama_print_timings(ctx);
- llama_free(ctx);
- llama_free_model(model);
-
- return 0;
- }
-
// export the cgraph and exit
if (params.export_cgraph) {
llama_eval_export(ctx, "llama.ggml");