diff options
author | vvhg1 <94630311+vvhg1@users.noreply.github.com> | 2023-10-02 09:42:02 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-02 10:42:02 +0300 |
commit | c97f01c362ac102c6994edb80008f8608539553a (patch) | |
tree | c73baa81f489587329c3f768a3b940e353233012 /llama.cpp | |
parent | f5ef5cfb18148131fcf45bdd2331f0db5ab7c3d0 (diff) |
infill : add new example + extend server API (#3296)
* vvhg-code-infill (#1)
* infill in separate example (#2)
* reverted changes to main and added infill example
* cleanup
* naming improvement
* make : add missing blank line
* fix missing semicolon
* brought infill up to current main code
* cleanup
---------
Co-authored-by: Cebtenzzre <cebtenzzre@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 20 |
1 files changed, 20 insertions, 0 deletions
@@ -1076,6 +1076,10 @@ struct llama_vocab { id special_pad_id = -1; id linefeed_id = 13; + id special_prefix_id = 32007; + id special_middle_id = 32009; + id special_suffix_id = 32008; + id special_eot_id = 32010; int find_bpe_rank(std::string token_left, std::string token_right) const { replace_all(token_left, " ", "\u0120"); @@ -7489,6 +7493,22 @@ llama_token llama_token_eos(const struct llama_context * ctx) { llama_token llama_token_nl(const struct llama_context * ctx) { return ctx->model.vocab.linefeed_id; } +llama_token llama_token_prefix(const struct llama_context * ctx) { + return ctx->model.vocab.special_prefix_id; +} + +llama_token llama_token_middle(const struct llama_context * ctx) { + return ctx->model.vocab.special_middle_id; +} + +llama_token llama_token_suffix(const struct llama_context * ctx) { + return ctx->model.vocab.special_suffix_id; +} + +llama_token llama_token_eot(const struct llama_context * ctx) { + return ctx->model.vocab.special_eot_id; +} + int llama_tokenize( const struct llama_model * model, |