From e76d630df17e235e6b9ef416c45996765d2e36fb Mon Sep 17 00:00:00 2001 From: Georgi Gerganov Date: Sun, 23 Jul 2023 15:09:47 +0300 Subject: llama : grouped-query attention + LLaMAv2 70B support (#2276) * CUDA: GQA implementation * llama : support for GQA and LLaMAv2 70B ggml-ci * py : fix hparams parsing (if-else blocks) ggml-ci * py : oh boy .. ggml-ci * help : fix gqa value for 70B ggml-ci --------- Co-authored-by: JohannesGaessler --- examples/common.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'examples/common.h') diff --git a/examples/common.h b/examples/common.h index c936de6f..fb8f6d65 100644 --- a/examples/common.h +++ b/examples/common.h @@ -27,6 +27,7 @@ struct gpt_params { int32_t n_predict = -1; // new tokens to predict int32_t n_ctx = 512; // context size int32_t n_batch = 512; // batch size for prompt processing (must be >=32 to use BLAS) + int32_t n_gqa = 1; // grouped-query attention factor (TODO: move to hparams) int32_t n_keep = 0; // number of tokens to keep from initial prompt int32_t n_chunks = -1; // max number of chunks to process (-1 = unlimited) int32_t n_gpu_layers = 0; // number of layers to store in VRAM @@ -47,7 +48,7 @@ struct gpt_params { int32_t repeat_last_n = 64; // last n tokens to penalize (0 = disable penalty, -1 = context size) float frequency_penalty = 0.00f; // 0.0 = disabled float presence_penalty = 0.00f; // 0.0 = disabled - int mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 + int32_t mirostat = 0; // 0 = disabled, 1 = mirostat, 2 = mirostat 2.0 float mirostat_tau = 5.00f; // target entropy float mirostat_eta = 0.10f; // learning rate -- cgit v1.2.3