diff options
author | Johannes Gäßler <johannesg@5d6.de> | 2023-08-22 22:47:05 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-08-22 22:47:05 +0200 |
commit | c63bb1d16a70c03440671b76954bb767513cead8 (patch) | |
tree | 5562dd98e6a37a1bfe635bf25114e3bdb5660996 /examples/server | |
parent | 3b6cfe7c927df178ca3c11643c3ec93e143471c9 (diff) |
CUDA: use mul_mat_q kernels by default (#2683)
Diffstat (limited to 'examples/server')
-rw-r--r-- | examples/server/server.cpp | 13 |
1 files changed, 6 insertions, 7 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp index 39fdf330..e5bc52cd 100644 --- a/examples/server/server.cpp +++ b/examples/server/server.cpp @@ -671,12 +671,11 @@ static void server_print_usage(const char *argv0, const gpt_params ¶ms, fprintf(stdout, " number of layers to store in VRAM\n"); fprintf(stdout, " -ts SPLIT --tensor-split SPLIT\n"); fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n"); - fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n"); fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n"); fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n"); - fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" ); - fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" ); - fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" ); + fprintf(stdout, " -nommq, --no-mul-mat-q\n"); + fprintf(stdout, " use cuBLAS instead of custom mul_mat_q CUDA kernels.\n"); + fprintf(stdout, " Not recommended since this is both slower and uses more VRAM.\n"); #endif fprintf(stdout, " -m FNAME, --model FNAME\n"); fprintf(stdout, " model path (default: %s)\n", params.model.c_str()); @@ -867,12 +866,12 @@ static void server_params_parse(int argc, char **argv, server_params &sparams, LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n", {}); #endif // GGML_USE_CUBLAS } - else if (arg == "--mul-mat-q" || arg == "-mmq") + else if (arg == "--no-mul-mat-q" || arg == "-nommq") { #ifdef GGML_USE_CUBLAS - params.mul_mat_q = true; + params.mul_mat_q = false; #else - LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n", {}); + LOG_WARNING("warning: llama.cpp was compiled without cuBLAS. Disabling mul_mat_q kernels has no effect.\n", {}); #endif // GGML_USE_CUBLAS } else if (arg == "--main-gpu" || arg == "-mg") |