summaryrefslogtreecommitdiff
path: root/examples/infill/infill.cpp
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-07-27 07:55:01 +0200
committerGitHub <noreply@github.com>2024-07-27 07:55:01 +0200
commit154e0d75fccf1784fe9ff6fd76a630b66563da3d (patch)
tree81ce6dbb5b1900c1aa78a879f0593c694cab9d27 /examples/infill/infill.cpp
parent0684c3e9c70d49323b4fc517128cbe222cab7f96 (diff)
Merge mainline llama.cpp (#3)
* Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'examples/infill/infill.cpp')
-rw-r--r--examples/infill/infill.cpp46
1 files changed, 19 insertions, 27 deletions
diff --git a/examples/infill/infill.cpp b/examples/infill/infill.cpp
index 3e82e4a8..dc93d230 100644
--- a/examples/infill/infill.cpp
+++ b/examples/infill/infill.cpp
@@ -204,25 +204,23 @@ int main(int argc, char ** argv) {
GGML_ASSERT(llama_add_eos_token(model) != 1);
LOG("add_bos: %d\n", add_bos);
- bool suff_rm_leading_spc = params.escape;
- if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
- params.input_suffix.erase(0, 1);
- suff_rm_leading_spc = false;
- }
std::vector<llama_token> embd_inp;
+ std::vector<llama_token> embd_end;
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
- const int space_token = 29871;
- if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
- inp_sfx.erase(inp_sfx.begin());
- }
+
+ GGML_ASSERT(llama_token_prefix(model) >= 0);
+ GGML_ASSERT(llama_token_suffix(model) >= 0);
+
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
+ inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
+
+ embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
+ embd_end = params.spm_infill ? inp_pfx : inp_sfx;
if (add_bos) {
- inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model));
+ embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
}
- inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
- embd_inp = inp_pfx;
- embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
+ embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
const llama_token middle_token = llama_token_middle(model);
if (middle_token >= 0) {
@@ -514,26 +512,21 @@ int main(int argc, char ** argv) {
string_process_escapes(params.input_prefix);
string_process_escapes(params.input_suffix);
}
- suff_rm_leading_spc = params.escape;
- if (suff_rm_leading_spc && params.input_suffix.find_first_of(' ') == 0 && params.input_suffix.size() > 1) {
- params.input_suffix.erase(0, 1);
- suff_rm_leading_spc = false;
- }
+
// tokenize new prefix and suffix
std::vector<llama_token> inp_pfx = ::llama_tokenize(ctx, params.input_prefix, false);
std::vector<llama_token> inp_sfx = ::llama_tokenize(ctx, params.input_suffix, false);
- if (suff_rm_leading_spc && inp_sfx[0] == space_token) {
- inp_sfx.erase(inp_sfx.begin());
- }
+
inp_pfx.insert(inp_pfx.begin(), llama_token_prefix(model));
+ inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
+
+ embd_inp = params.spm_infill ? inp_sfx : inp_pfx;
+ embd_end = params.spm_infill ? inp_pfx : inp_sfx;
if (add_bos) {
- inp_pfx.insert(inp_pfx.begin(), llama_token_bos(model));
+ embd_inp.insert(embd_inp.begin(), llama_token_bos(model));
}
- inp_sfx.insert(inp_sfx.begin(), llama_token_suffix(model));
- embd_inp = inp_pfx;
- embd_inp.insert(embd_inp.end(), inp_sfx.begin(), inp_sfx.end());
+ embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
- const llama_token middle_token = llama_token_middle(model);
if (middle_token >= 0) {
embd_inp.push_back(middle_token);
}
@@ -657,4 +650,3 @@ int main(int argc, char ** argv) {
return 0;
}
-