diff options
author | Marcus Dunn <51931484+MarcusDunn@users.noreply.github.com> | 2023-10-23 12:40:03 -0700 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-10-23 22:40:03 +0300 |
commit | 5be6c803fa5378f62a1590f3ad8c6b64c7c0c2ce (patch) | |
tree | 190868e0431070686d797c3c2d86da857b8ba55f /examples/main | |
parent | 6336701c9378c23c85d1c0e464b663ca2bbb8e60 (diff) |
llama : remove token functions with `context` args in favor of `model` (#3720)
* added `llama_model_token_*` variants to all the `llama_token_*` functions.
* added `LLAMA_API`
* formatting
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
* removed old `llama_token` functions
* changed 3 more functions to take in model
- `llama_token_get_text`
- `llama_token_get_score`
- `llama_token_get_type`
* added back docs
* fixed main.cpp
* changed token functions to use new model variants
* changed token functions to use new model variants
---------
Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'examples/main')
-rw-r--r-- | examples/main/main.cpp | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/examples/main/main.cpp b/examples/main/main.cpp index 2621bd53..3d9f670b 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -248,7 +248,7 @@ int main(int argc, char ** argv) { // Should not run without any tokens if (embd_inp.empty()) { - embd_inp.push_back(llama_token_bos(ctx)); + embd_inp.push_back(llama_token_bos(model)); LOG("embd_inp was considered empty and bos was added: %s\n", LOG_TOKENS_TOSTR_PRETTY(ctx, embd_inp).c_str()); } @@ -693,7 +693,7 @@ int main(int argc, char ** argv) { } // deal with end of text token in interactive mode - if (llama_sampling_last(ctx_sampling) == llama_token_eos(ctx)) { + if (llama_sampling_last(ctx_sampling) == llama_token_eos(model)) { LOG("found EOS token\n"); if (params.interactive) { @@ -720,7 +720,7 @@ int main(int argc, char ** argv) { if (params.input_prefix_bos) { LOG("adding input prefix BOS token\n"); - embd_inp.push_back(llama_token_bos(ctx)); + embd_inp.push_back(llama_token_bos(model)); } std::string buffer; @@ -804,7 +804,7 @@ int main(int argc, char ** argv) { } // end of text token - if (!embd.empty() && embd.back() == llama_token_eos(ctx) && !(params.instruct || params.interactive)) { + if (!embd.empty() && embd.back() == llama_token_eos(model) && !(params.instruct || params.interactive)) { LOG_TEE(" [end of text]\n"); break; } |