From 70c29da118cdb02bfcbd0376c32b5b2236e48e48 Mon Sep 17 00:00:00 2001 From: Kerfuffle <44031344+KerfuffleV2@users.noreply.github.com> Date: Wed, 11 Oct 2023 13:35:46 -0600 Subject: common : fix mirostat state when using multiple sequences (#3543) * Fix mirostat state when using multiple sequences * Fix mirostat by completely refactoring sampling! * Try to fix zig build. * Export function to fetch/create default sampler states Code formatting cleanups and add some comments Silence a warning about id not being used when logging is disabled * Apply some renaming suggestions. Fix comments that were out of sync with the pull. * Use more consistant naming convention for sampling contexts --- examples/save-load-state/save-load-state.cpp | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'examples/save-load-state') diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp index acc6dbdf..f9e3c98a 100644 --- a/examples/save-load-state/save-load-state.cpp +++ b/examples/save-load-state/save-load-state.cpp @@ -8,9 +8,10 @@ int main(int argc, char ** argv) { gpt_params params; + llama_sampling_params & sparams = params.sampling_params; params.seed = 42; params.n_threads = 4; - params.repeat_last_n = 64; + sparams.repeat_last_n = 64; params.prompt = "The quick brown fox"; if (!gpt_params_parse(argc, argv, params)) { @@ -24,7 +25,7 @@ int main(int argc, char ** argv) { } auto n_past = 0; - auto last_n_tokens_data = std::vector(params.repeat_last_n, 0); + auto last_n_tokens_data = std::vector(sparams.repeat_last_n, 0); // init llama_model * model; -- cgit v1.2.3