From 2347463201a9f4159ae95b737e1544dd300569c8 Mon Sep 17 00:00:00 2001 From: Howard Su Date: Tue, 11 Jul 2023 22:37:01 +0800 Subject: Support using mmap when applying LoRA (#2095) * Support using mmap when applying LoRA * Fix Linux * Update comment to reflect the support lora with mmap --- examples/common.cpp | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'examples/common.cpp') diff --git a/examples/common.cpp b/examples/common.cpp index 93159c6d..fad16887 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -267,7 +267,6 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { break; } params.lora_adapter = argv[i]; - params.use_mmap = false; } else if (arg == "--lora-base") { if (++i >= argc) { invalid_param = true; @@ -499,7 +498,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stderr, " --mtest compute maximum memory usage\n"); fprintf(stderr, " --export export the computation graph to 'llama.ggml'\n"); fprintf(stderr, " --verbose-prompt print prompt before generation\n"); - fprintf(stderr, " --lora FNAME apply LoRA adapter (implies --no-mmap)\n"); + fprintf(stderr, " --lora FNAME apply LoRA adapter\n"); fprintf(stderr, " --lora-base FNAME optional model to use as a base for the layers modified by the LoRA adapter\n"); fprintf(stderr, " -m FNAME, --model FNAME\n"); fprintf(stderr, " model path (default: %s)\n", params.model.c_str()); -- cgit v1.2.3