summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAarni Koskela <akx@iki.fi>2024-02-13 15:24:50 +0200
committerGitHub <noreply@github.com>2024-02-13 15:24:50 +0200
commit037259be689353081e7bae3c1ab4ab18e7fbe8c9 (patch)
tree4049ecb3e05e68aa80c1c1d84930a7a4c11736b9
parent263978904c7472db1865409a7ff1129599f6a40b (diff)
llama : make load error reporting more granular (#5477)
Makes it easier to pinpoint where e.g. `unordered_map::at: key not found` comes from.
-rw-r--r--llama.cpp18
1 files changed, 15 insertions, 3 deletions
diff --git a/llama.cpp b/llama.cpp
index 381a0306..61c69518 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -4384,9 +4384,21 @@ static int llama_model_load(const std::string & fname, llama_model & model, llam
model.hparams.vocab_only = params.vocab_only;
- llm_load_arch (ml, model);
- llm_load_hparams(ml, model);
- llm_load_vocab (ml, model);
+ try {
+ llm_load_arch(ml, model);
+ } catch(const std::exception & e) {
+ throw std::runtime_error("error loading model architecture: " + std::string(e.what()));
+ }
+ try {
+ llm_load_hparams(ml, model);
+ } catch(const std::exception & e) {
+ throw std::runtime_error("error loading model hyperparameters: " + std::string(e.what()));
+ }
+ try {
+ llm_load_vocab(ml, model);
+ } catch(const std::exception & e) {
+ throw std::runtime_error("error loading model vocabulary: " + std::string(e.what()));
+ }
llm_load_print_meta(ml, model);