summaryrefslogtreecommitdiff
path: root/common
diff options
context:
space:
mode:
Diffstat (limited to 'common')
-rw-r--r--common/CMakeLists.txt2
-rw-r--r--common/common.cpp20
-rw-r--r--common/common.h28
-rw-r--r--common/ngram-cache.cpp280
-rw-r--r--common/ngram-cache.h94
5 files changed, 411 insertions, 13 deletions
diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt
index 10951693..1d840e5f 100644
--- a/common/CMakeLists.txt
+++ b/common/CMakeLists.txt
@@ -65,6 +65,8 @@ add_library(${TARGET} STATIC
json.hpp
train.h
train.cpp
+ ngram-cache.h
+ ngram-cache.cpp
)
if (BUILD_SHARED_LIBS)
diff --git a/common/common.cpp b/common/common.cpp
index de6eb960..69c2d5bf 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -963,6 +963,22 @@ static bool gpt_params_find_arg(int argc, char ** argv, const std::string & arg,
}
return true;
}
+ if (arg == "-lcs" || arg == "--lookup-cache-static") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.lookup_cache_static = argv[i];
+ return true;
+ }
+ if (arg == "-lcd" || arg == "--lookup-cache-dynamic") {
+ if (++i >= argc) {
+ invalid_param = true;
+ return true;
+ }
+ params.lookup_cache_dynamic = argv[i];
+ return true;
+ }
if (arg == "--save-all-logits" || arg == "--kl-divergence-base") {
if (++i >= argc) {
invalid_param = true;
@@ -1436,6 +1452,10 @@ void gpt_print_usage(int /*argc*/, char ** argv, const gpt_params & params) {
printf(" Hugging Face model file (default: unused)\n");
printf(" -ld LOGDIR, --logdir LOGDIR\n");
printf(" path under which to save YAML logs (no logging if unset)\n");
+ printf(" -lcs FNAME, --lookup-cache-static FNAME\n");
+ printf(" path to static lookup cache to use for lookup decoding (not updated by generation)\n");
+ printf(" -lcd FNAME, --lookup-cache-dynamic FNAME\n");
+ printf(" path to dynamic lookup cache to use for lookup decoding (updated by generation)\n");
printf(" --override-kv KEY=TYPE:VALUE\n");
printf(" advanced option to override model metadata by key. may be specified multiple times.\n");
printf(" types: int, float, bool. example: --override-kv tokenizer.ggml.add_bos_token=bool:false\n");
diff --git a/common/common.h b/common/common.h
index d827d4df..afa4cf6d 100644
--- a/common/common.h
+++ b/common/common.h
@@ -88,20 +88,22 @@ struct gpt_params {
// // sampling parameters
struct llama_sampling_params sparams;
- std::string model = "models/7B/ggml-model-f16.gguf"; // model path
- std::string model_draft = ""; // draft model for speculative decoding
- std::string model_alias = "unknown"; // model alias
- std::string model_url = ""; // model url to download
- std::string hf_repo = ""; // HF repo
- std::string hf_file = ""; // HF file
- std::string prompt = "";
- std::string prompt_file = ""; // store the external prompt file name
- std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
- std::string input_prefix = ""; // string to prefix user inputs with
- std::string input_suffix = ""; // string to suffix user inputs with
+ std::string model = "models/7B/ggml-model-f16.gguf"; // model path
+ std::string model_draft = ""; // draft model for speculative decoding
+ std::string model_alias = "unknown"; // model alias
+ std::string model_url = ""; // model url to download
+ std::string hf_repo = ""; // HF repo
+ std::string hf_file = ""; // HF file
+ std::string prompt = "";
+ std::string prompt_file = ""; // store the external prompt file name
+ std::string path_prompt_cache = ""; // path to file for saving/loading prompt eval state
+ std::string input_prefix = ""; // string to prefix user inputs with
+ std::string input_suffix = ""; // string to suffix user inputs with
std::vector<std::string> antiprompt; // string upon seeing which more user input is prompted
- std::string logdir = ""; // directory in which to save YAML log files
- std::string logits_file = ""; // file for saving *all* logits
+ std::string logdir = ""; // directory in which to save YAML log files
+ std::string lookup_cache_static = ""; // path of static ngram cache file for lookup decoding
+ std::string lookup_cache_dynamic = ""; // path of dynamic ngram cache file for lookup decoding
+ std::string logits_file = ""; // file for saving *all* logits
std::vector<llama_model_kv_override> kv_overrides;
diff --git a/common/ngram-cache.cpp b/common/ngram-cache.cpp
new file mode 100644
index 00000000..20703d30
--- /dev/null
+++ b/common/ngram-cache.cpp
@@ -0,0 +1,280 @@
+#include "ngram-cache.h"
+#include "log.h"
+
+#include <fstream>
+
+void llama_ngram_cache_update(llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max,
+ std::vector<llama_token> & inp, int nnew, bool print_progress) {
+ const int64_t t_start_ms = ggml_time_ms();
+ const int64_t inp_size = inp.size();
+
+ const int64_t n_todo = inp_size * (ngram_max - ngram_min + 1);
+ int64_t n_done = 0;
+
+ for (int64_t ngram_size = ngram_min; ngram_size <= ngram_max; ++ngram_size) {
+ const int64_t i_start = std::max(inp_size - nnew, ngram_size);
+ for (int64_t i = i_start; i < inp_size; ++i) {
+ const int64_t ngram_start = i - ngram_size;
+ llama_ngram ngram(&inp[ngram_start], ngram_size);
+ const llama_token token = inp[i];
+
+ llama_ngram_cache::iterator part_it = ngram_cache.find(ngram);
+ if (part_it == ngram_cache.end()) {
+ llama_ngram_cache_part part;
+ part.emplace(token, 1);
+ ngram_cache.emplace(ngram, part);
+ } else {
+ llama_ngram_cache_part::iterator token_count_it = part_it->second.find(token);
+ if (token_count_it == part_it->second.end()) {
+ part_it->second.emplace(token, 1);
+ } else {
+ token_count_it->second++;
+ }
+ }
+ ++n_done;
+
+ if (print_progress && n_done % 10000000 == 0) {
+ const int64_t t_now_ms = ggml_time_ms();
+ const int64_t eta_ms = (inp_size*(ngram_max-ngram_min+1) - n_done) * (t_now_ms - t_start_ms) / n_done;
+ const int64_t eta_min = eta_ms / (60*1000);
+ const int64_t eta_s = (eta_ms - 60*1000*eta_min) / 1000;
+
+ fprintf(stderr, "%s: %" PRId64 "/%" PRId64 " done, ETA: %02" PRId64 ":%02" PRId64 "\n", __func__, n_done, n_todo, eta_min, eta_s);
+ }
+ }
+ }
+}
+
+// Helper function to get a token from the combined, speculative sequence of inp and draft.
+static llama_token get_token(const std::vector<llama_token> & inp, const std::vector<llama_token> & draft, const size_t i) {
+ return i < inp.size() ? inp[i] : draft[1 + i - inp.size()];
+}
+
+// If sample size or percentage are below these thresholds the draft is aborted early:
+constexpr int draft_min_sample_size_lax[LLAMA_NGRAM_MAX] = { 2, 2, 1, 1};
+constexpr int draft_min_percent_lax[LLAMA_NGRAM_MAX] = {66, 50, 50, 50};
+constexpr int draft_min_sample_size_strict[LLAMA_NGRAM_MAX] = { 4, 3, 2, 2};
+constexpr int draft_min_percent_strict[LLAMA_NGRAM_MAX] = {75, 66, 66, 66};
+
+// Helper function that tries to draft a token from only the static ngram cache:
+static llama_token try_draft(llama_ngram_cache & nc_static, const llama_ngram ngram_static) {
+ llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
+ if (part_static_it == nc_static.end()) {
+ return -1;
+ }
+ const llama_ngram_cache_part part_static = part_static_it->second;
+
+ int max_count_static = 0;
+ int sum_count_static = 0;
+ llama_token max_token = -1;
+
+ for (std::pair<llama_token, int> token_count_static : part_static) {
+ const llama_token token = token_count_static.first;
+ const int32_t count_static = token_count_static.second;
+
+ if (count_static > max_count_static) {
+ max_token = token;
+ max_count_static = count_static;
+ }
+ sum_count_static += count_static;
+ }
+
+ if (sum_count_static < draft_min_sample_size_lax[LLAMA_NGRAM_STATIC-1]) {
+ return -1;
+ }
+ if (100*max_count_static < draft_min_percent_lax[LLAMA_NGRAM_STATIC-1]*sum_count_static) {
+ return -1;
+ }
+ return max_token;
+}
+
+// Try to draft a token from primary cache (context/dynamic), validate with static cache:
+static llama_token try_draft(
+ llama_ngram_cache & nc_primary, const std::vector<llama_ngram> & ngrams_primary, llama_ngram_cache_part & part_static,
+ const int * min_sample_size, const int * min_percent) {
+
+ llama_token drafted_token = -1;
+
+ for (int i = ngrams_primary.size()-1; i >= 0 && drafted_token == -1; --i) {
+ const llama_ngram ngram_primary = ngrams_primary[i];
+
+ llama_ngram_cache::iterator part_primary_it = nc_primary.find(ngram_primary);
+ if (part_primary_it == nc_primary.end()) {
+ continue;
+ }
+ const llama_ngram_cache_part part_primary = part_primary_it->second;
+
+ int max_count_primary = 0;
+ int max_count_static = 0;
+ int sum_count_primary = 0;
+ llama_token max_token = -1;
+
+ for (std::pair<llama_token, int> token_count_primary : part_primary) {
+ const llama_token token = token_count_primary.first;
+
+ llama_ngram_cache_part::iterator token_count_static_it = part_static.find(token);
+
+ const int32_t count_primary = token_count_primary.second;
+ const int32_t count_static = token_count_static_it != part_static.end() ? 100*token_count_static_it->second : 1;
+
+ if (count_primary*count_static > max_count_primary*max_count_static) {
+ max_token = token;
+ max_count_primary = count_primary;
+ max_count_static = count_static;
+ }
+ sum_count_primary += count_primary;
+ }
+
+ if (sum_count_primary < min_sample_size[i]) {
+ continue;
+ }
+ if (100*max_count_primary < min_percent[i]*sum_count_primary) {
+ continue;;
+ }
+ drafted_token = max_token;
+ }
+
+ return drafted_token;
+}
+
+void llama_ngram_cache_draft(
+ std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
+ llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static
+) {
+ GGML_ASSERT(draft.size() == 1);
+ const int inp_size = inp.size();
+
+ if (inp_size < LLAMA_NGRAM_STATIC) {
+ return;
+ }
+
+ while ((int) draft.size()-1 < n_draft) {
+ llama_token drafted_token = -1;
+
+ const int ngram_start_static = inp_size-LLAMA_NGRAM_STATIC + draft.size()-1;
+ llama_ngram ngram_static;
+ for (int j = ngram_start_static; j < ngram_start_static + LLAMA_NGRAM_STATIC; ++j) {
+ ngram_static.tokens[j-ngram_start_static] = get_token(inp, draft, j);
+ }
+ llama_ngram_cache::iterator part_static_it = nc_static.find(ngram_static);
+ llama_ngram_cache_part part_static;
+ if (part_static_it != nc_static.end()) {
+ part_static = part_static_it->second;
+ }
+
+ // cd = context + dynamic
+ std::vector<llama_ngram> ngrams_cd;
+ for (int ngram_size_cd = ngram_min; ngram_size_cd <= ngram_max; ++ngram_size_cd) {
+ const int ngram_start_cd = inp_size-ngram_size_cd + draft.size()-1;
+ llama_ngram ngram_cd;
+ for (int j = ngram_start_cd; j < ngram_start_cd + ngram_size_cd; ++j) {
+ ngram_cd.tokens[j-ngram_start_cd] = get_token(inp, draft, j);
+ }
+ ngrams_cd.push_back(ngram_cd);
+ }
+ if (drafted_token == -1) {
+ drafted_token = try_draft(nc_context, ngrams_cd, part_static, draft_min_sample_size_lax, draft_min_percent_lax);
+ }
+ if (drafted_token == -1) {
+ drafted_token = try_draft(nc_dynamic, ngrams_cd, part_static, draft_min_sample_size_strict, draft_min_percent_strict);
+ }
+ if (drafted_token == -1) {
+ drafted_token = try_draft(nc_static, ngram_static);
+ }
+
+ if (drafted_token == -1) {
+ break;
+ }
+
+ LOG(" - draft candidate: token=%d\n", drafted_token);
+ draft.push_back(drafted_token);
+ }
+}
+
+void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename) {
+ std::ofstream file_out(filename, std::ios::binary);
+ for (std::pair<llama_ngram, llama_ngram_cache_part> item : ngram_cache) {
+ const llama_ngram ngram = item.first;
+ llama_ngram_cache_part token_counts = item.second;
+ GGML_ASSERT(!token_counts.empty());
+ const int32_t ntokens = token_counts.size();
+ GGML_ASSERT(ntokens > 0);
+
+ file_out.write(reinterpret_cast<const char *>(&ngram), sizeof(llama_ngram));
+ file_out.write(reinterpret_cast<const char *>(&ntokens), sizeof(int32_t));
+ for (std::pair<llama_token, int32_t> item2 : token_counts) {
+ const llama_token token = item2.first;
+ const int32_t count = item2.second;
+ GGML_ASSERT(count > 0);
+
+ file_out.write(reinterpret_cast<const char *>(&token), sizeof(llama_token));
+ file_out.write(reinterpret_cast<const char *>(&count), sizeof(int32_t));
+ }
+ }
+
+}
+
+llama_ngram_cache llama_ngram_cache_load(std::string & filename) {
+ std::ifstream hashmap_file(filename, std::ios::binary);
+ if (!hashmap_file) {
+ throw std::ifstream::failure("Unable to open file " + filename);
+ }
+ llama_ngram_cache ngram_cache;
+
+ llama_ngram ngram;
+ int32_t ntokens;
+ llama_token token;
+ int32_t count;
+
+ char * ngramc = reinterpret_cast<char*>(&ngram);
+ char * ntokensc = reinterpret_cast<char*>(&ntokens);
+ char * tokenc = reinterpret_cast<char*>(&token);
+ char * countc = reinterpret_cast<char*>(&count);
+ while(hashmap_file.read(ngramc, sizeof(llama_ngram))) {
+ GGML_ASSERT(!hashmap_file.eof());
+ GGML_ASSERT(hashmap_file.read(ntokensc, sizeof(int32_t)));
+ GGML_ASSERT(ntokens > 0);
+ llama_ngram_cache_part token_counts;
+
+ for (int i = 0; i < ntokens; ++i) {
+ GGML_ASSERT(!hashmap_file.eof());
+ GGML_ASSERT(hashmap_file.read(tokenc, sizeof(llama_token)));
+ GGML_ASSERT(!hashmap_file.eof());
+ GGML_ASSERT(hashmap_file.read(countc, sizeof(int32_t)));
+ GGML_ASSERT(count > 0);
+ token_counts.emplace(token, count);
+ }
+
+ ngram_cache.emplace(ngram, token_counts);
+ }
+ GGML_ASSERT(hashmap_file.eof());
+
+ return ngram_cache;
+}
+
+void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add) {
+ for (std::pair<llama_ngram, llama_ngram_cache_part> ngram_part : ngram_cache_add) {
+ const llama_ngram ngram = ngram_part.first;
+ llama_ngram_cache_part part = ngram_part.second;
+
+ llama_ngram_cache::iterator part_merged_it = ngram_cache_target.find(ngram);
+ if (part_merged_it == ngram_cache_target.end()) {
+ ngram_cache_target.emplace(ngram, part);
+ continue;
+ }
+
+ for (std::pair<llama_token, int32_t> token_count : part) {
+ const llama_token token = token_count.first;
+ const int32_t count = token_count.second;
+ GGML_ASSERT(count > 0);
+
+ llama_ngram_cache_part::iterator token_count_merged_it = part_merged_it->second.find(token);
+ if (token_count_merged_it == part_merged_it->second.end()) {
+ part_merged_it->second.emplace(token, count);
+ continue;
+ }
+
+ token_count_merged_it->second += count;
+ }
+ }
+}
diff --git a/common/ngram-cache.h b/common/ngram-cache.h
new file mode 100644
index 00000000..e4fa4cbd
--- /dev/null
+++ b/common/ngram-cache.h
@@ -0,0 +1,94 @@
+#pragma once
+
+#include "llama.h"
+
+#include <unordered_map>
+#include <string>
+#include <vector>
+
+#define LLAMA_NGRAM_MIN 1
+#define LLAMA_NGRAM_MAX 4
+#define LLAMA_NGRAM_STATIC 2
+
+// Data structures to map n-grams to empirical token probabilities:
+
+struct llama_ngram {
+ llama_token tokens[LLAMA_NGRAM_MAX];
+
+ llama_ngram() {
+ for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
+ tokens[i] = -1;
+ }
+ }
+
+ llama_ngram(const llama_token * input, const int ngram_size) {
+ for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
+ tokens[i] = i < ngram_size ? input[i] : -1;
+ }
+ }
+
+ bool operator==(const llama_ngram & other) const {
+ for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
+ if (tokens[i] != other.tokens[i]) {
+ return false;
+ }
+ }
+ return true;
+ }
+};
+
+struct llama_ngram_hash_function {
+ size_t operator()(const llama_ngram & ngram) const {
+ size_t hash = 0;
+ for (int i = 0; i < LLAMA_NGRAM_MAX; ++i) {
+ hash ^= std::hash<llama_token>{}(ngram.tokens[i]);
+ }
+ return hash;
+ }
+};
+
+// token -> number of times token has been seen
+typedef std::unordered_map<llama_token, int32_t> llama_ngram_cache_part;
+
+// n-gram -> empirical distribution of following tokens
+typedef std::unordered_map<llama_ngram, llama_ngram_cache_part, llama_ngram_hash_function> llama_ngram_cache;
+
+
+// Update an ngram cache with tokens.
+// ngram_cache: the cache to modify.
+// ngram_min/ngram_max: the min/max size of the ngrams to extract from inp_data.
+// inp_data: the token sequence with which to update ngram_cache.
+// nnew: how many new tokens have been appended to inp_data since the last call to this function.
+// print_progress: whether to print progress to stderr.
+//
+// In order to get correct results inp_data can ONLY BE APPENDED TO.
+// Changes in the middle need a complete rebuild.
+void llama_ngram_cache_update(
+ llama_ngram_cache & ngram_cache, int ngram_min, int ngram_max, std::vector<llama_token> & inp_data, int nnew, bool print_progress);
+
+// Try to draft tokens from ngram caches.
+// inp: the tokens generated so far.
+// draft: the token sequence to draft. Expected to initially contain the previously sampled token.
+// n_draft: maximum number of tokens to add to draft.
+// ngram_min/gram_max: the min/max size of the ngrams in nc_context and nc_dynamic.
+// nc_context: ngram cache based on current context.
+// nc_dynamic: ngram cache based on previous user generations.
+// nc_static: ngram cache generated from a large text corpus, used for validation.
+void llama_ngram_cache_draft(
+ std::vector<llama_token> & inp, std::vector<llama_token> & draft, int n_draft, int ngram_min, int ngram_max,
+ llama_ngram_cache & nc_context, llama_ngram_cache & nc_dynamic, llama_ngram_cache & nc_static);
+
+// Save an ngram cache to a file.
+// ngram_cache: the ngram cache to save.
+// filename: the path under which to save the ngram cache.
+void llama_ngram_cache_save(llama_ngram_cache & ngram_cache, std::string & filename);
+
+// Load an ngram cache saved with llama_ngram_cache_save.
+// filename: the path from which to load the ngram cache.
+// returns: an ngram cache containing the information saved to filename.
+llama_ngram_cache llama_ngram_cache_load(std::string & filename);
+
+// Merge two ngram caches.
+// ngram_cache_target: the ngram cache to which to add the information from ngram_cache_add.
+// ngram_cache_add: the ngram cache to add to ngram_cache_target.
+void llama_ngram_cache_merge(llama_ngram_cache & ngram_cache_target, llama_ngram_cache & ngram_cache_add);