summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-10-28 14:23:11 +0300
committerGitHub <noreply@github.com>2023-10-28 14:23:11 +0300
commitee1a0ec9cb367ba41d138134795cbbbe93d2bf1c (patch)
tree04111cbd66f34c6cc986e70bb47b96b39828e0f2
parent177461104b454163473dced2a5038f4e016cdb7e (diff)
llama : add option for greedy sampling with probs (#3813)
* llama : add option for greedy sampling with probs * llama : add comment about llama_sample_token_greedy() missing probs * sampling : temp == 0.0 -> no probs, temp < 0.0 -> probs
-rw-r--r--common/common.cpp1
-rw-r--r--common/sampling.cpp8
-rw-r--r--examples/speculative/speculative.cpp2
-rw-r--r--llama.h1
4 files changed, 9 insertions, 3 deletions
diff --git a/common/common.cpp b/common/common.cpp
index c0d4924e..f81f4d35 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -224,6 +224,7 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) {
break;
}
sparams.temp = std::stof(argv[i]);
+ sparams.temp = std::max(sparams.temp, 0.0f);
} else if (arg == "--tfs") {
if (++i >= argc) {
invalid_param = true;
diff --git a/common/sampling.cpp b/common/sampling.cpp
index 5258d4e8..c4996c98 100644
--- a/common/sampling.cpp
+++ b/common/sampling.cpp
@@ -167,8 +167,12 @@ llama_token llama_sampling_sample(
llama_sample_grammar(ctx_main, &cur_p, ctx_sampling->grammar);
}
- if (temp <= 0) {
- // greedy sampling
+ if (temp < 0.0) {
+ // greedy sampling, with probs
+ llama_sample_softmax(ctx_main, &cur_p);
+ id = cur_p.data[0].id;
+ } else if (temp == 0.0) {
+ // greedy sampling, no probs
id = llama_sample_token_greedy(ctx_main, &cur_p);
} else {
if (mirostat == 1) {
diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp
index f921b784..323c7465 100644
--- a/examples/speculative/speculative.cpp
+++ b/examples/speculative/speculative.cpp
@@ -148,7 +148,7 @@ int main(int argc, char ** argv) {
std::vector<seq_draft> drafts(n_seq_dft);
params.sparams.grammar.clear(); // the draft samplers will copy the target sampler's grammar
- params.sparams.temp = std::max(0.01f, params.sparams.temp);
+ params.sparams.temp = -1.0f; // force greedy sampling with probs for the draft model
for (int s = 0; s < n_seq_dft; ++s) {
drafts[s].ctx_sampling = llama_sampling_init(params.sparams);
diff --git a/llama.h b/llama.h
index beac9a0c..d901dcd9 100644
--- a/llama.h
+++ b/llama.h
@@ -658,6 +658,7 @@ extern "C" {
float * mu);
/// @details Selects the token with the highest probability.
+ /// Does not compute the token probabilities. Use llama_sample_softmax() instead.
LLAMA_API llama_token llama_sample_token_greedy(
struct llama_context * ctx,
llama_token_data_array * candidates);