summaryrefslogtreecommitdiff
path: root/examples/common.h
diff options
context:
space:
mode:
authorWangHaoranRobin <56047610+WangHaoranRobin@users.noreply.github.com>2023-07-03 05:38:44 +0800
committerGitHub <noreply@github.com>2023-07-03 00:38:44 +0300
commitd7d2e6a0f0c74f7a570dae384dfff371ac744d2a (patch)
treead82a8c9b71b5375936062ca2fc89fc012af10ed /examples/common.h
parent46088f72318981341a2d646f12f6eee6aec06d65 (diff)
server: add option to output probabilities for completion (#1962)
* server: add option to output probabilities for completion * server: fix issue when handling probability output for incomplete tokens for multibyte character generation * server: fix llama_sample_top_k order * examples/common.h: put all bool variables in gpt_params together
Diffstat (limited to 'examples/common.h')
-rw-r--r--examples/common.h3
1 files changed, 2 insertions, 1 deletions
diff --git a/examples/common.h b/examples/common.h
index 66e56729..96f2228f 100644
--- a/examples/common.h
+++ b/examples/common.h
@@ -31,7 +31,7 @@ struct gpt_params {
int32_t n_gpu_layers = 0; // number of layers to store in VRAM
int32_t main_gpu = 0; // the GPU that is used for scratch and small tensors
float tensor_split[LLAMA_MAX_DEVICES] = {0}; // how split tensors should be distributed across GPUs
- bool low_vram = 0; // if true, reduce VRAM usage at the cost of performance
+ int32_t n_probs = 0; // if greater than 0, output the probabilities of top n_probs tokens.
// sampling parameters
std::unordered_map<llama_token, float> logit_bias; // logit bias for specific tokens
@@ -59,6 +59,7 @@ struct gpt_params {
std::string lora_adapter = ""; // lora adapter path
std::string lora_base = ""; // base model path for the lora adapter
+ bool low_vram = false; // if true, reduce VRAM usage at the cost of performance
bool memory_f16 = true; // use f16 instead of f32 for memory kv
bool random_prompt = false; // do not randomize prompt if none provided
bool use_color = false; // use color to distinguish generations and inputs