diff options
author | Marian Cepok <marian.cepok@gmail.com> | 2023-04-02 12:21:31 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-04-02 13:21:31 +0300 |
commit | c0bb1d3ce21005ab21d686626ba87261a6e3a660 (patch) | |
tree | b2892bed5a3e233d7779d4c0fe37cbf8b6fb0ca1 /llama.cpp | |
parent | 6e7801d08d81c931a5427bae46f00763e993f54a (diff) |
ggml : change ne to int64_t (#626)
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 6 |
1 files changed, 3 insertions, 3 deletions
@@ -256,8 +256,8 @@ static bool kv_cache_init( const int n_embd = hparams.n_embd; const int n_layer = hparams.n_layer; - const int n_mem = n_layer*n_ctx; - const int n_elements = n_embd*n_mem; + const int64_t n_mem = (int64_t)n_layer*n_ctx; + const int64_t n_elements = n_embd*n_mem; cache.buf.resize(2u*n_elements*ggml_type_size(wtype) + 2u*MB); @@ -679,7 +679,7 @@ static bool llama_model_load( return false; } if (tensor->ne[0] != ne[0] || tensor->ne[1] != ne[1]) { - fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%d, %d], expected [%d, %d]\n", + fprintf(stderr, "%s: tensor '%s' has wrong shape in model file: got [%" PRId64 ", %" PRId64 "], expected [%d, %d]\n", __func__, name.data(), tensor->ne[0], tensor->ne[1], ne[0], ne[1]); return false; } |