summaryrefslogtreecommitdiff
path: root/examples/save-load-state/save-load-state.cpp
diff options
context:
space:
mode:
authorDavid Friehs <david@friehs.info>2024-01-13 17:29:43 +0100
committerGitHub <noreply@github.com>2024-01-13 18:29:43 +0200
commitdf845cc982e7e2ea7b9900e29d55b15338faa78d (patch)
tree07c1eb5f5b9a3ac21fa70e499029907d9d90b008 /examples/save-load-state/save-load-state.cpp
parent6b48ed089377330cdb362970a51c1c89b6d857a8 (diff)
llama : minimize size used for state save/load (#4820)
* examples : save-load-state: save only required state * llama : only reserve n_vocab * n_batch at most for logits llama_decode asserts that only n_batch tokens are passed each call, and n_ctx is expected to be bigger than n_batch. * llama : always reserve n_vocab * n_batch for logits llama_context de-serialization breaks if the contexts have differing capacity for logits and llama_decode will at maximum resize to n_vocab * n_batch. * llama : only save and restore used logits for batch sizes of 512 this reduces save state in the best case by around 62 MB, which can be a lot if planning to save on each message to allow regenerating messages. * llama : use ostringstream and istringstream for save and load * llama : serialize rng into minimum amount of space required * llama : break session version due to serialization changes
Diffstat (limited to 'examples/save-load-state/save-load-state.cpp')
-rw-r--r--examples/save-load-state/save-load-state.cpp21
1 files changed, 10 insertions, 11 deletions
diff --git a/examples/save-load-state/save-load-state.cpp b/examples/save-load-state/save-load-state.cpp
index 48d80111..ef952e2b 100644
--- a/examples/save-load-state/save-load-state.cpp
+++ b/examples/save-load-state/save-load-state.cpp
@@ -45,13 +45,13 @@ int main(int argc, char ** argv) {
// save state (rng, logits, embedding and kv_cache) to file
{
std::vector<uint8_t> state_mem(llama_get_state_size(ctx));
+ const size_t written = llama_copy_state_data(ctx, state_mem.data());
- {
- FILE *fp_write = fopen("dump_state.bin", "wb");
- llama_copy_state_data(ctx, state_mem.data()); // could also copy directly to memory mapped file
- fwrite(state_mem.data(), 1, state_mem.size(), fp_write);
- fclose(fp_write);
- }
+ FILE *fp_write = fopen("dump_state.bin", "wb");
+ fwrite(state_mem.data(), 1, written, fp_write);
+ fclose(fp_write);
+
+ fprintf(stderr, "%s : serialized state into %zd out of a maximum of %zd bytes\n", __func__, written, state_mem.size());
}
// save state (last tokens)
@@ -100,18 +100,17 @@ int main(int argc, char ** argv) {
std::vector<uint8_t> state_mem(llama_get_state_size(ctx2));
FILE * fp_read = fopen("dump_state.bin", "rb");
+ const size_t read = fread(state_mem.data(), 1, state_mem.size(), fp_read);
+ fclose(fp_read);
- const size_t ret = fread(state_mem.data(), 1, state_mem.size(), fp_read);
- if (ret != state_mem.size()) {
+ if (read != llama_set_state_data(ctx2, state_mem.data())) {
fprintf(stderr, "\n%s : failed to read state\n", __func__);
llama_free(ctx2);
llama_free_model(model);
return 1;
}
- llama_set_state_data(ctx2, state_mem.data());
-
- fclose(fp_read);
+ fprintf(stderr, "%s : deserialized state from %zd out of a maximum of %zd bytes\n", __func__, read, state_mem.size());
}
// restore state (last tokens)