diff options
author | Johannes Gäßler <johannesg@5d6.de> | 2023-06-14 19:47:19 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-06-14 19:47:19 +0200 |
commit | 254a7a7a5ff4c874ff8488f1f5cbdd7e9c89d682 (patch) | |
tree | 65f35a2d189f3cf6f1f625b2acb343c2dd77790d /examples/common.h | |
parent | 92549202659fc23ba9fec5e688227d0da9b06b40 (diff) |
CUDA full GPU acceleration, KV cache in VRAM (#1827)
* Fixed CUDA RoPE
* ggml_cuda_mul_mat_vec_p021
* ggml_cuda_scale
* ggml_cuda_diag_mask_inf
* ggml_is_permuted
* ggml_cuda_cpy
* flatten rows for ggml_cuda_op
* Added a --low-vram option
* Fixed Windows performance
* Fixed LLAMA_CUDA_DMMV_Y > 1 for WizardLM
Diffstat (limited to 'examples/common.h')
-rw-r--r-- | examples/common.h | 17 |
1 files changed, 9 insertions, 8 deletions
diff --git a/examples/common.h b/examples/common.h index 6fedb414..6c2953cb 100644 --- a/examples/common.h +++ b/examples/common.h @@ -21,15 +21,16 @@ int32_t get_num_physical_cores(); struct gpt_params { - int32_t seed = -1; // RNG seed - int32_t n_threads = get_num_physical_cores(); - int32_t n_predict = -1; // new tokens to predict - int32_t n_ctx = 512; // context size - int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) - int32_t n_keep = 0; // number of tokens to keep from initial prompt - int32_t n_gpu_layers = 0; // number of layers to store in VRAM - int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors + int32_t seed = -1; // RNG seed + int32_t n_threads = get_num_physical_cores(); + int32_t n_predict = -1; // new tokens to predict + int32_t n_ctx = 512; // context size + int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_keep = 0; // number of tokens to keep from initial prompt + int32_t n_gpu_layers = 0; // number of layers to store in VRAM + int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs + bool low_vram = 0; // if true, reduce VRAM usage at the cost of performance // sampling parameters std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens |