From d75561df207d22790609ee0ad924302f66ac2599 Mon Sep 17 00:00:00 2001 From: Cheng Shao Date: Mon, 14 Aug 2023 15:36:42 +0200 Subject: server : add --numa support (#2524) --- examples/server/server.cpp | 5 +++++ 1 file changed, 5 insertions(+) (limited to 'examples/server/server.cpp') diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 2340f93a..222dbcb4 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -666,6 +666,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, { fprintf(stdout, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } + fprintf(stdout, " --numa attempt optimizations that help on some NUMA systems\n"); #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD fprintf(stdout, " -ngl N, --n-gpu-layers N\n"); fprintf(stdout, " number of layers to store in VRAM\n"); @@ -940,6 +941,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.use_mmap = false; } + else if (arg == "--numa") + { + params.numa = true; + } else if (arg == "--embedding") { params.embedding = true; -- cgit v1.2.3