diff options
author | Cheng Shao <terrorjack@type.dance> | 2023-08-14 15:36:42 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-14 16:36:42 +0300 |
commit | d75561df207d22790609ee0ad924302f66ac2599 (patch) | |
tree | 4cabea7e81c1ffe2564e819cd34025b0990b3ad2 /examples/server/server.cpp | |
parent | 348acf188c9fbe66396990f2dc83229df367969b (diff) |
server : add --numa support (#2524)
Diffstat (limited to 'examples/server/server.cpp')
-rw-r--r-- | examples/server/server.cpp | 5 |
1 files changed, 5 insertions, 0 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 2340f93a..222dbcb4 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -666,6 +666,7 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, { fprintf(stdout, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n"); } + fprintf(stdout, " --numa attempt optimizations that help on some NUMA systems\n"); #ifdef LLAMA_SUPPORTS_GPU_OFFLOAD fprintf(stdout, " -ngl N, --n-gpu-layers N\n"); fprintf(stdout, " number of layers to store in VRAM\n"); @@ -940,6 +941,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, { params.use_mmap = false; } + else if (arg == "--numa") + { + params.numa = true; + } else if (arg == "--embedding") { params.embedding = true; |