summaryrefslogtreecommitdiff
path: root/examples/common.cpp
diff options
context:
space:
mode:
authorHoward Su <howard0su@gmail.com>2023-07-13 21:58:25 +0800
committerGitHub <noreply@github.com>2023-07-13 21:58:25 +0800
commit32c54116318929c90fd7ae814cf9b5232cd44c36 (patch)
tree3b9126e3fb387ef1aa53d7461f9a41e1ce2965ed /examples/common.cpp
parentff5d58faecf1f02b05bd015bdfc6a394cf2bc9ba (diff)
Revert "Support using mmap when applying LoRA (#2095)" (#2206)
Has perf regression when mlock is used. This reverts commit 2347463201a9f4159ae95b737e1544dd300569c8.
Diffstat (limited to 'examples/common.cpp')
-rw-r--r--examples/common.cpp3
1 files changed, 2 insertions, 1 deletions
diff --git a/examples/common.cpp b/examples/common.cpp
index fd551c9c..94875b05 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -285,6 +285,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
break;
}
params.lora_adapter = argv[i];
+ params.use_mmap = false;
} else if (arg == "--lora-base") {
if (++i >= argc) {
invalid_param = true;
@@ -520,7 +521,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stderr, " --mtest compute maximum memory usage\n");
fprintf(stderr, " --export export the computation graph to 'llama.ggml'\n");
fprintf(stderr, " --verbose-prompt print prompt before generation\n");
- fprintf(stderr, " --lora FNAME apply LoRA adapter\n");
+ fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n");
fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n");
fprintf(stderr, " -m FNAME, --model FNAME\n");
fprintf(stderr, " model path (default: %s)\n", params.model.c_str());