summaryrefslogtreecommitdiff
path: root/src/llama-sampling.cpp
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-06-03 11:32:03 +0300
committerGitHub <noreply@github.com>2025-06-03 11:32:03 +0300
commitccb265c01676aad9ae5860ba50e74e61dfcd1cf8 (patch)
tree8e2d9303bd091c4d0015fce8402162346d998cca /src/llama-sampling.cpp
parent4f8b05a0d76e6c5e47fe1f6c7bd079e0fe95dbba (diff)
Adding the XTC sampler (#486)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'src/llama-sampling.cpp')
-rw-r--r--src/llama-sampling.cpp34
1 files changed, 34 insertions, 0 deletions
diff --git a/src/llama-sampling.cpp b/src/llama-sampling.cpp
index 8910f6d6..06f44b02 100644
--- a/src/llama-sampling.cpp
+++ b/src/llama-sampling.cpp
@@ -434,6 +434,40 @@ void llama_sample_temp_impl(struct llama_sampling * smpl, llama_token_data_array
}
}
+void llama_sample_xtc_impl(struct llama_sampling * smpl, llama_token_data_array * candidates, float probability, float threshold, size_t min_keep) {
+ if (probability < 0 || threshold > 0.5f || candidates->size < 2) {
+ return;
+ }
+ GGML_ASSERT(smpl);
+ const int64_t t_start_sample_us = ggml_time_us();
+ if (probability < 1) {
+ std::uniform_real_distribution<float> distribution(0.0f, 1.0f);
+ float chance = distribution(smpl->rng);
+ if (chance > probability) return;
+ }
+
+ llama_sample_softmax_impl(nullptr, candidates);
+
+ auto cur_size = candidates->size;
+
+ int pos_last = 0;
+
+ for (size_t i = 0; i < candidates->size; ++i) {
+ if (candidates->data[i].p >= threshold) {
+ pos_last = i;
+ } else break;
+ }
+
+ if (candidates->size - pos_last >= min_keep && pos_last > 0) {
+ candidates->data += pos_last;
+ candidates->size -= pos_last;
+ }
+
+ smpl->t_sample_us += ggml_time_us() - t_start_sample_us;
+ smpl->n_sample++;
+
+}
+
void llama_sample_repetition_penalties_impl(
struct llama_sampling * smpl,
llama_token_data_array * candidates,