summaryrefslogtreecommitdiff
path: root/main.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'main.cpp')
-rw-r--r--main.cpp21
1 files changed, 20 insertions, 1 deletions
diff --git a/main.cpp b/main.cpp
index c005d17c..15903337 100644
--- a/main.cpp
+++ b/main.cpp
@@ -3,6 +3,7 @@
#include "utils.h"
#include <cassert>
+#include <cinttypes>
#include <cmath>
#include <cstdio>
#include <cstring>
@@ -105,10 +106,24 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
{
uint32_t magic;
fin.read((char *) &magic, sizeof(magic));
- if (magic != 0x67676d6c) {
+ if (magic == 0x67676d6c) {
+ fprintf(stderr, "%s: invalid model file '%s' (too old, regenerate your model files!)\n",
+ __func__, fname.c_str());
+ return false;
+ }
+ if (magic != 0x67676d66) {
fprintf(stderr, "%s: invalid model file '%s' (bad magic)\n", __func__, fname.c_str());
return false;
}
+
+ uint32_t format_version;
+ fin.read((char *) &format_version, sizeof(format_version));
+
+ if (format_version != 1) {
+ fprintf(stderr, "%s: invalid model file '%s' (unsupported format version %" PRIu32 ")\n",
+ __func__, fname.c_str(), format_version);
+ return false;
+ }
}
int n_ff = 0;
@@ -154,8 +169,12 @@ bool llama_model_load(const std::string & fname, llama_model & model, gpt_vocab
word.resize(len);
fin.read((char *) word.data(), len);
+ float score;
+ fin.read((char *) &score, sizeof(score));
+
vocab.token_to_id[word] = i;
vocab.id_to_token[i] = word;
+ vocab.score[i] = score;
//if (i < 30000) {
// fprintf(stderr, "%s: vocab[%d] = '%s'\n", __func__, i, word.c_str());