summaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorJed Fox <git@jedfox.com>2023-03-25 01:26:28 -0400
committerGitHub <noreply@github.com>2023-03-25 07:26:28 +0200
commit58e6c9f36f97d0a3e287b97256dc5f6b0e9fb5ae (patch)
tree13a245a38e0a7331ce26aae6db53bc62d8bd6a2f /llama.h
parent36d07532ef7ccf0bdc12e050472f359a6794957f (diff)
Add support for file load progress reporting callbacks (#434)
* File load progress reporting * Move llama_progress_handler into llama_context_params * Renames * Use seekg to find file size instead * More correct load progress * Call progress callback more frequently * Fix typo
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h7
1 files changed, 7 insertions, 0 deletions
diff --git a/llama.h b/llama.h
index 57123dbc..827abc1f 100644
--- a/llama.h
+++ b/llama.h
@@ -45,6 +45,8 @@ extern "C" {
} llama_token_data;
+ typedef void (*llama_progress_callback)(double progress, void *ctx);
+
struct llama_context_params {
int n_ctx; // text context
int n_parts; // -1 for default
@@ -55,6 +57,11 @@ extern "C" {
bool vocab_only; // only load the vocabulary, no weights
bool use_mlock; // force system to keep model in RAM
bool embedding; // embedding mode only
+
+ // called with a progress value between 0 and 1, pass NULL to disable
+ llama_progress_callback progress_callback;
+ // context pointer passed to the progress callback
+ void * progress_callback_user_data;
};
LLAMA_API struct llama_context_params llama_context_default_params();