summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-08-25 15:16:19 +0200
committerGitHub <noreply@github.com>2023-08-25 15:16:19 +0200
commit154725c5436808e5c519685d0279e850596dbe62 (patch)
tree09190086aae7ffa71a2e8089b0a2fc60074b8196 /llama.cpp
parent12e2e33a977af73e75885eeee91c5575a77f4e5f (diff)
llama-bench : add model sizes (#2771)
* llama-bench : add model sizes * more compact markdown output * back to GiB * adjust column sizes
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp18
1 files changed, 17 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index d12b6d1c..4529ac82 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5297,13 +5297,29 @@ int llama_model_n_embd(const struct llama_model * model) {
return model->hparams.n_embd;
}
-int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size) {
+int llama_model_desc(const struct llama_model * model, char * buf, size_t buf_size) {
return snprintf(buf, buf_size, "%s %s %s",
model->name.c_str(),
llama_model_type_name(model->type),
llama_model_ftype_name(model->ftype).c_str());
}
+uint64_t llama_model_size(const struct llama_model * model) {
+ uint64_t size = 0;
+ for (const auto & it : model->tensors_by_name) {
+ size += ggml_nbytes(it.second);
+ }
+ return size;
+}
+
+uint64_t llama_model_n_params(const struct llama_model * model) {
+ uint64_t nparams = 0;
+ for (const auto & it : model->tensors_by_name) {
+ nparams += ggml_nelements(it.second);
+ }
+ return nparams;
+}
+
int llama_model_quantize(
const char * fname_inp,
const char * fname_out,