summaryrefslogtreecommitdiff
path: root/common/common.cpp
diff options
context:
space:
mode:
authorRoland <14355895+rbur0425@users.noreply.github.com>2023-09-15 03:28:45 -0400
committerGitHub <noreply@github.com>2023-09-15 10:28:45 +0300
commit2d770505a89a99ce78a5950cf14fc06d3176ffa4 (patch)
tree4a45ba87ef9d45a5c11322c30bb98e1690c69f6b /common/common.cpp
parent98311c427739e3b06527c3ce6b5c021ab6692740 (diff)
llama : remove mtest (#3177)
* Remove mtest * remove from common/common.h and examples/main/main.cpp
Diffstat (limited to 'common/common.cpp')
-rw-r--r--common/common.cpp4
1 files changed, 0 insertions, 4 deletions
diff --git a/common/common.cpp b/common/common.cpp
index afc9b8a5..9969cb97 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -434,8 +434,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
#endif // GGML_USE_CUBLAS
} else if (arg == "--no-mmap") {
params.use_mmap = false;
- } else if (arg == "--mtest") {
- params.mem_test = true;
} else if (arg == "--numa") {
params.numa = true;
} else if (arg == "--export") {
@@ -687,7 +685,6 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" Not recommended since this is both slower and uses more VRAM.\n");
#endif // GGML_USE_CUBLAS
#endif
- printf(" --mtest compute maximum memory usage\n");
printf(" --export export the computation graph to 'llama.ggml'\n");
printf(" --verbose-prompt print prompt before generation\n");
fprintf(stderr, " --simple-io use basic IO for better compatibility in subprocesses and limited consoles\n");
@@ -1225,7 +1222,6 @@ void dump_non_result_info_yaml(FILE * stream, const gpt_params & params, const l
fprintf(stream, "mlock: %s # default: false\n", params.use_mlock ? "true" : "false");
fprintf(stream, "model: %s # default: models/7B/ggml-model.bin\n", params.model.c_str());
fprintf(stream, "model_draft: %s # default:\n", params.model_draft.c_str());
- fprintf(stream, "mtest: %s # default: false\n", params.mem_test ? "true" : "false");
fprintf(stream, "multiline_input: %s # default: false\n", params.multiline_input ? "true" : "false");
fprintf(stream, "n_gpu_layers: %d # default: -1\n", params.n_gpu_layers);
fprintf(stream, "n_predict: %d # default: -1 (unlimited)\n", params.n_predict);