From b6e7f9b09e9c340ec97a2fae61c1eb8db861f2f9 Mon Sep 17 00:00:00 2001 From: xaedes Date: Sat, 22 Apr 2023 08:21:32 +0200 Subject: llama : add api for getting/setting the complete state: rng, logits, embedding and kv_cache (#1105) * reserve correct size for logits * add functions to get and set the whole llama state: including rng, logits, embedding and kv_cache * remove unused variables * remove trailing whitespace * fix comment --- llama.h | 12 ++++++++++++ 1 file changed, 12 insertions(+) (limited to 'llama.h') diff --git a/llama.h b/llama.h index e95ff73b..f68a0cb4 100644 --- a/llama.h +++ b/llama.h @@ -129,6 +129,18 @@ extern "C" { size_t n_size, int n_token_count); + // Returns the size in bytes of the state (rng, logits, embedding and kv_cache) + LLAMA_API size_t llama_get_state_size(struct llama_context * ctx); + + // Copies the state to the specified destination address. + // Destination needs to have allocated enough memory. + // Returns the number of bytes copied + LLAMA_API size_t llama_copy_state_data(struct llama_context * ctx, uint8_t * dest); + + // Set the state reading from the specified address + // Returns the number of bytes read + LLAMA_API size_t llama_set_state_data(struct llama_context * ctx, const uint8_t * src); + // Run the llama inference to obtain the logits and probabilities for the next token. // tokens + n_tokens is the provided batch of new tokens to process // n_past is the number of tokens to use from previous eval calls -- cgit v1.2.3