diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-09-05 08:46:17 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-09-05 08:46:17 +0300 |
commit | 921772104ba2219bfdc2b2980d05ebc0aa0c92a4 (patch) | |
tree | c9ba8f0f8d0471c9c3c85a6963b322affa0f4f97 /common/common.cpp | |
parent | 2ba85c8609309a59d49c45ab43c31800b7ba141c (diff) |
speculative : add grammar support (#2991)
* speculative : add grammar support
* grammars : add json_arr.gbnf
* grammar : add comments to new grammar file
* grammar : remove one nested level
* common : warm-up with 2 tokens - seems to work better
* speculative : print draft token pieces
* speculative : reuse grammar parser + better logs and comments
* speculative : avoid grammar_mem
* make : fix speculative build
Diffstat (limited to 'common/common.cpp')
-rw-r--r-- | common/common.cpp | 2 |
1 files changed, 1 insertions, 1 deletions
diff --git a/common/common.cpp b/common/common.cpp index 74e1b6fd..d4f9dbf5 100644 --- a/common/common.cpp +++ b/common/common.cpp @@ -772,7 +772,7 @@ std::tuple<struct llama_model *, struct llama_context *> llama_init_from_gpt_par { LOG("warming up the model with an empty run\n"); - const std::vector<llama_token> tmp = { llama_token_bos(lctx), }; + const std::vector<llama_token> tmp = { llama_token_bos(lctx), llama_token_eos(lctx), }; llama_eval(lctx, tmp.data(), tmp.size(), 0, params.n_threads); llama_reset_timings(lctx); } |