diff options
author | Howard Su <howard0su@gmail.com> | 2023-07-11 22:37:01 +0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-07-11 22:37:01 +0800 |
commit | 2347463201a9f4159ae95b737e1544dd300569c8 (patch) | |
tree | 48983fbda7eb96a08121059615ab7b21e084d2d7 /examples/common.cpp | |
parent | bbef28218fe827265716b66977719b9ee2b21165 (diff) |
Support using mmap when applying LoRA (#2095)
* Support using mmap when applying LoRA
* Fix Linux
* Update comment to reflect the support lora with mmap
Diffstat (limited to 'examples/common.cpp')
-rw-r--r-- | examples/common.cpp | 3 |
1 files changed, 1 insertions, 2 deletions
diff --git a/examples/common.cpp b/examples/common.cpp index 93159c6d..fad16887 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -267,7 +267,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } params.lora_adapter = argv[i]; - params.use_mmap = false; } else if (arg == "--lora-base") { if (++i >= argc) { invalid_param = true; @@ -499,7 +498,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " --mtest compute maximum memory usage\n"); fprintf(stderr, " --export export the computation graph to 'llama.ggml'\n"); fprintf(stderr, " --verbose-prompt print prompt before generation\n"); - fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); + fprintf(stderr, " --lora FNAME apply LoRA adapter\n"); fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n"); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); |