summaryrefslogtreecommitdiff
path: root/common/common.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2023-08-23 23:08:04 +0300
committerGitHub <noreply@github.com>2023-08-23 23:08:04 +0300
commitcf658adc832badaaa2ca119fe86070e5a830f8f6 (patch)
treee314db2fb18676067ddbc5cde0cf7f73c417af29 /common/common.cpp
parenta192860cfec89a38d59a943623bf595b1fe4495b (diff)
llm : add Falcon support (#2717)
* llama : refactor GGUF constants into static maps * llama : check if model architecture is known * llama : refactor llama_model_load_internal() * gguf : add KV constant maps * llm : read arch-specific KVs * convert : add dummy scores + types * falcon : load tensor data (CPU only) * llama : fix loading progress bar * llama : add arch member to llama_model * falcon : CPU inference working * falcon : support non-40B models * falcon : minor * llama : minor updates ggml-ci * convert-falcon-hf-to-gguf.py : fix special token mapping * llama.cpp : llama default UNK token = id 0 * llama.cpp : fix bpe tokenizer * llama.cpp : fix the fix of bpe tokenizer * ggml : pass eps to ggml_norm * metal : implement RoPE (mode = 2) + avoid ggml_repeat * ggml : ggml_repeat always creates new tensor * falcon : copy-paste self-attention from LLaMA * metal : print extra compute pipeline info * falcon : minor changes (still chasing the Metal problem) * llama.cpp : fix linefeed token * metal : fix GELU kernel numerical stability by using precise::tanh * metal : temporary workaround for the concurrency optimization bug * falcon : add CUDA offloading (#2739) * llama : better model naming and size reporting * llama : prep new tokenizer support * llama : advanced BPE tokenizer based on ggllm.cpp imlpementation * llama : remove oboslete comment ggml-ci * common : remove obsolete BPE API + disable test-tokenizer-1 * llama : revert BPE special-case in llama_byte_to_token() * cuda : add TODOs for RoPE NeoX implementation * llama : default special tokens based on vocab type * perplexity : add log for start of tokenization --------- Co-authored-by: klosax <131523366+klosax@users.noreply.github.com> Co-authored-by: slaren <slarengh@gmail.com>
Diffstat (limited to 'common/common.cpp')
-rw-r--r--common/common.cpp32
1 files changed, 0 insertions, 32 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 88a962ae..53002ba3 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -744,35 +744,3 @@ std::string llama_token_to_str(const struct llama_context * ctx, llama_token tok
return std::string(result.data(), result.size());
}
-
-std::vector<llama_token> llama_tokenize_bpe(
- struct llama_context * ctx,
- const std::string & text,
- bool add_bos) {
- int n_tokens = text.length() + add_bos;
- std::vector<llama_token> result(n_tokens);
- n_tokens = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
- if (n_tokens < 0) {
- result.resize(-n_tokens);
- int check = llama_tokenize_bpe(ctx, text.c_str(), result.data(), result.size(), add_bos);
- GGML_ASSERT(check == -n_tokens);
- } else {
- result.resize(n_tokens);
- }
- return result;
-}
-
-std::string llama_token_to_str_bpe(const struct llama_context * ctx, llama_token token) {
- std::vector<char> result(8, 0);
- const int n_tokens = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
- if (n_tokens < 0) {
- result.resize(-n_tokens);
- const int check = llama_token_to_str_bpe(ctx, token, result.data(), result.size());
- GGML_ASSERT(check == -n_tokens);
- } else {
- result.resize(n_tokens);
- }
-
- return std::string(result.data(), result.size());
-}
-