summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorRobey Holderith <robey@flaminglunchbox.net>2024-02-18 11:11:16 -0800
committerGitHub <noreply@github.com>2024-02-18 21:11:16 +0200
commit5ee99c32f5e47c8d32634eff9a47fb32a24c276b (patch)
treeb96e45b36fe325a926025f94655a885428c84f47 /examples
parentc145f8a132b2fe1d1e65987faddbd9a40bef7a12 (diff)
common, server : surface min_keep as its own parameter (#5567)
* Feature - surface min_keep as its own parameter * Updated README with min_keep param
Diffstat (limited to 'examples')
-rw-r--r--examples/server/README.md2
-rw-r--r--examples/server/public/index.html4
-rw-r--r--examples/server/server.cpp2
3 files changed, 8 insertions, 0 deletions
diff --git a/examples/server/README.md b/examples/server/README.md
index ac5133d2..809e2d37 100644
--- a/examples/server/README.md
+++ b/examples/server/README.md
@@ -199,6 +199,8 @@ node index.js
`n_probs`: If greater than 0, the response also contains the probabilities of top N tokens for each generated token (default: 0)
+ `min_keep`: If greater than 0, force samplers to return N possible tokens at minimum (default: 0)
+
`image_data`: An array of objects to hold base64-encoded image `data` and its `id`s to be reference in `prompt`. You can determine the place of the image in the prompt as in the following: `USER:[img-12]Describe the image in detail.\nASSISTANT:`. In this case, `[img-12]` will be replaced by the embeddings of the image with id `12` in the following `image_data` array: `{..., "image_data": [{"data": "<BASE64_STRING>", "id": 12}]}`. Use `image_data` only with multimodal models, e.g., LLaVA.
`slot_id`: Assign the completion task to an specific slot. If is -1 the task will be assigned to a Idle slot (default: -1)
diff --git a/examples/server/public/index.html b/examples/server/public/index.html
index b059c75f..84038ddc 100644
--- a/examples/server/public/index.html
+++ b/examples/server/public/index.html
@@ -234,6 +234,7 @@
mirostat_eta: 0.1, // learning rate
grammar: '',
n_probs: 0, // no completion_probabilities,
+ min_keep: 0, // min probs from each sampler,
image_data: [],
cache_prompt: true,
api_key: ''
@@ -792,6 +793,9 @@
${IntField({ label: "Show Probabilities", max: 10, min: 0, name: "n_probs", value: params.value.n_probs })}
</fieldset>
<fieldset>
+ ${IntField({ label: "Min Probabilities from each Sampler", max: 10, min: 0, name: "min_keep", value: params.value.min_keep })}
+ </fieldset>
+ <fieldset>
<label for="api_key">API Key</label>
<input type="text" name="api_key" value="${params.value.api_key}" placeholder="Enter API key" oninput=${updateParams} />
</fieldset>
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 4f2e9c89..22c344dd 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -548,6 +548,7 @@ struct llama_server_context
slot->params.seed = json_value(data, "seed", default_params.seed);
slot->sparams.grammar = json_value(data, "grammar", default_sparams.grammar);
slot->sparams.n_probs = json_value(data, "n_probs", default_sparams.n_probs);
+ slot->sparams.min_keep = json_value(data, "min_keep", default_sparams.min_keep);
if (slot->n_predict > 0 && slot->params.n_predict > slot->n_predict) {
// Might be better to reject the request with a 400 ?
@@ -1093,6 +1094,7 @@ struct llama_server_context
{"stream", slot.params.stream},
{"logit_bias", slot.sparams.logit_bias},
{"n_probs", slot.sparams.n_probs},
+ {"min_keep", slot.sparams.min_keep},
{"grammar", slot.sparams.grammar},
{"samplers", samplers_sequence}
};