From 0728c5a8b9569183ffca0399caac099afef87595 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Johannes=20G=C3=A4=C3=9Fler?= Date: Mon, 31 Jul 2023 15:44:35 +0200 Subject: CUDA: mmq CLI option, fixed mmq build issues (#2453) --- examples/common.cpp | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) (limited to 'examples/common.cpp') diff --git a/examples/common.cpp b/examples/common.cpp index fe7308b1..e6439841 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -352,7 +352,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { #ifdef GGML_USE_CUBLAS params.main_gpu = std::stoi(argv[i]); #else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n"); + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a main GPU.\n"); #endif } else if (arg == "--tensor-split" || arg == "-ts") { if (++i >= argc) { @@ -376,13 +376,19 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { } } #else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n"); + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n"); +#endif // GGML_USE_CUBLAS + } else if (arg == "--mul-mat-q" || arg == "-mmq") { +#ifdef GGML_USE_CUBLAS + params.mul_mat_q = true; +#else + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to use mul_mat_q kernels.\n"); #endif // GGML_USE_CUBLAS } else if (arg == "--low-vram" || arg == "-lv") { #ifdef GGML_USE_CUBLAS params.low_vram = true; #else - fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n"); + fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n"); #endif // GGML_USE_CUBLAS } else if (arg == "--no-mmap") { params.use_mmap = false; @@ -585,6 +591,9 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) { fprintf(stdout, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n"); fprintf(stdout, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n" ); fprintf(stdout, " -lv, --low-vram don't allocate VRAM scratch buffer\n" ); + fprintf(stdout, " -mmq, --mul-mat-q use experimental mul_mat_q CUDA kernels instead of cuBLAS. TEMP!!!\n" ); + fprintf(stdout, " Reduces VRAM usage by 700/970/1430 MiB for 7b/13b/33b but prompt processing speed\n" ); + fprintf(stdout, " is still suboptimal, especially q2_K, q3_K, q5_K, and q6_K.\n" ); #endif fprintf(stdout, " --mtest compute maximum memory usage\n"); fprintf(stdout, " --export export the computation graph to 'llama.ggml'\n"); @@ -637,6 +646,7 @@ struct llama_context_params llama_context_params_from_gpt_params(const gpt_param lparams.main_gpu = params.main_gpu; lparams.tensor_split = params.tensor_split; lparams.low_vram = params.low_vram; + lparams.mul_mat_q = params.mul_mat_q; lparams.seed = params.seed; lparams.f16_kv = params.memory_f16; lparams.use_mmap = params.use_mmap; -- cgit v1.2.3