summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorCheng Shao <terrorjack@type.dance>2023-08-14 15:36:42 +0200
committerGitHub <noreply@github.com>2023-08-14 16:36:42 +0300
commitd75561df207d22790609ee0ad924302f66ac2599 (patch)
tree4cabea7e81c1ffe2564e819cd34025b0990b3ad2
parent348acf188c9fbe66396990f2dc83229df367969b (diff)
server : add --numa support (#2524)
-rw-r--r--examples/server/README.md1
-rw-r--r--examples/server/server.cpp5
2 files changed, 6 insertions, 0 deletions
diff --git a/examples/server/README.md b/examples/server/README.md
index e56ca063..1559dd3f 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -16,6 +16,7 @@ Command line options:
- `--memory-f32`: Use 32-bit floats instead of 16-bit floats for memory key+value. Not recommended.
- `--mlock`: Lock the model in memory, preventing it from being swapped out when memory-mapped.
- `--no-mmap`: Do not memory-map the model. By default, models are mapped into memory, which allows the system to load only the necessary parts of the model as needed.
+- `--numa`: Attempt optimizations that help on some NUMA systems.
- `--lora FNAME`: Apply a LoRA (Low-Rank Adaptation) adapter to the model (implies --no-mmap). This allows you to adapt the pretrained model to specific tasks or domains.
- `--lora-base FNAME`: Optional model to use as a base for the layers modified by the LoRA adapter. This flag is used in conjunction with the `--lora` flag, and specifies the base model for the adaptation.
- `-to N`, `--timeout N`: Server read/write timeout in seconds. Default `600`.
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 2340f93a..222dbcb4 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -666,6 +666,7 @@ static void server_print_usage(const char *argv0, const gpt_params &params,
{
fprintf(stdout, " --no-mmap do not memory-map model (slower load but may reduce pageouts if not using mlock)\n");
}
+ fprintf(stdout, " --numa attempt optimizations that help on some NUMA systems\n");
#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
fprintf(stdout, " -ngl N, --n-gpu-layers N\n");
fprintf(stdout, " number of layers to store in VRAM\n");
@@ -940,6 +941,10 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
{
params.use_mmap = false;
}
+ else if (arg == "--numa")
+ {
+ params.numa = true;
+ }
else if (arg == "--embedding")
{
params.embedding = true;