summaryrefslogtreecommitdiff
path: root/examples/speculative/speculative.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-10-20 21:07:23 +0300
committerGitHub <noreply@github.com>2023-10-20 21:07:23 +0300
commitd1031cf49c3b958b915fd558e23453471c29ac33 (patch)
tree14fa2bc6d54d5e27bd1e8bfd6fa4dbf894dbe6b9 /examples/speculative/speculative.cpp
parent8cf19d60dc93809db8e51fedc811595eed9134c5 (diff)
sampling : refactor init to use llama_sampling_params (#3696)
* sampling : refactor init to use llama_sampling_params * llama : combine repetition, frequency and presence penalties in 1 call * examples : remove embd-input and gptneox-wip * sampling : rename penalty params + reduce size of "prev" vector * sampling : add llama_sampling_print helper * sampling : hide prev behind API and apply #3661 ggml-ci
Diffstat (limited to 'examples/speculative/speculative.cpp')
-rw-r--r--examples/speculative/speculative.cpp12
1 files changed, 6 insertions, 6 deletions
diff --git a/examples/speculative/speculative.cpp b/examples/speculative/speculative.cpp
index 24f49012..894321ce 100644
--- a/examples/speculative/speculative.cpp
+++ b/examples/speculative/speculative.cpp
@@ -112,16 +112,16 @@ int main(int argc, char ** argv) {
bool has_eos = false;
// target model sampling context
- struct llama_sampling_context * ctx_sampling = llama_sampling_init(params);
+ struct llama_sampling_context * ctx_sampling = llama_sampling_init(params.sparams);
// draft sequence data
std::vector<seq_draft> drafts(n_seq_dft);
- params.grammar.clear(); // the draft samplers will copy the target sampler's grammar
- params.sampling_params.temp = std::max(0.01f, params.sampling_params.temp);
+ params.sparams.grammar.clear(); // the draft samplers will copy the target sampler's grammar
+ params.sparams.temp = std::max(0.01f, params.sparams.temp);
for (int s = 0; s < n_seq_dft; ++s) {
- drafts[s].ctx_sampling = llama_sampling_init(params);
+ drafts[s].ctx_sampling = llama_sampling_init(params.sparams);
}
llama_batch batch_dft = llama_batch_init(params.n_ctx, 0, 1);
@@ -154,7 +154,7 @@ int main(int argc, char ** argv) {
// sample from the target model
llama_token id = llama_sampling_sample(ctx_sampling, ctx_tgt, NULL, drafts[s_keep].i_batch_tgt[i_dft]);
- llama_sampling_accept(ctx_sampling, ctx_tgt, id);
+ llama_sampling_accept(ctx_sampling, ctx_tgt, id, true);
//LOG("last: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx_tgt, ctx_sampling->prev).c_str());
@@ -328,7 +328,7 @@ int main(int argc, char ** argv) {
const int s = sa[is];
- llama_sampling_accept(drafts[s].ctx_sampling, ctx_dft, id);
+ llama_sampling_accept(drafts[s].ctx_sampling, ctx_dft, id, true);
drafts[s].tokens.push_back(id);