summaryrefslogtreecommitdiff
path: root/examples/common.cpp
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-06-14 19:47:19 +0200
committerGitHub <noreply@github.com>2023-06-14 19:47:19 +0200
commit254a7a7a5ff4c874ff8488f1f5cbdd7e9c89d682 (patch)
tree65f35a2d189f3cf6f1f625b2acb343c2dd77790d /examples/common.cpp
parent92549202659fc23ba9fec5e688227d0da9b06b40 (diff)
CUDA full GPU acceleration, KV cache in VRAM (#1827)
* Fixed CUDA RoPE * ggml_cuda_mul_mat_vec_p021 * ggml_cuda_scale * ggml_cuda_diag_mask_inf * ggml_is_permuted * ggml_cuda_cpy * flatten rows for ggml_cuda_op * Added a --low-vram option * Fixed Windows performance * Fixed LLAMA_CUDA_DMMV_Y > 1 for WizardLM
Diffstat (limited to 'examples/common.cpp')
-rw-r--r--examples/common.cpp8
1 files changed, 8 insertions, 0 deletions
diff --git a/examples/common.cpp b/examples/common.cpp
index df69f273..dc69e537 100644
--- a/examples/common.cpp
+++ b/examples/common.cpp
@@ -332,6 +332,12 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
#else
fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set a tensor split.\n");
#endif // GGML_USE_CUBLAS
+ } else if (arg == "--low-vram" || arg == "-lv") {
+#ifdef GGML_USE_CUBLAS
+ params.low_vram = true;
+#else
+ fprintf(stderr, "warning: llama.cpp was compiled without cuBLAS. It is not possible to set lower vram usage.\n");
+#endif // GGML_USE_CUBLAS
} else if (arg == "--no-mmap") {
params.use_mmap = false;
} else if (arg == "--mtest") {
@@ -479,6 +485,7 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
fprintf(stderr, " -ts SPLIT --tensor-split SPLIT\n");
fprintf(stderr, " how to split tensors across multiple GPUs, comma-separated list of proportions, e.g. 3,1\n");
fprintf(stderr, " -mg i, --main-gpu i the GPU to use for scratch and small tensors\n" );
+ fprintf(stderr, " -lv, --low-vram don't allocate VRAM scratch buffer\n" );
#endif
fprintf(stderr, " --mtest compute maximum memory usage\n");
fprintf(stderr, " --export export the computation graph to 'llama.ggml'\n");
@@ -528,6 +535,7 @@ struct llama_context * llama_init_from_gpt_params(const gpt_params & params) {
lparams.n_gpu_layers = params.n_gpu_layers;
lparams.main_gpu = params.main_gpu;
memcpy(lparams.tensor_split, params.tensor_split, LLAMA_MAX_DEVICES*sizeof(float));
+ lparams.low_vram = params.low_vram;
lparams.seed = params.seed;
lparams.f16_kv = params.memory_f16;
lparams.use_mmap = params.use_mmap;