summaryrefslogtreecommitdiff
path: root/llama.h
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-08-18 12:44:58 +0200
committerGitHub <noreply@github.com>2023-08-18 12:44:58 +0200
commit097e121e2f17ed3541cf02c55ff7e9febc091b19 (patch)
treef3bead40b2632be95479e3f9b31baffc6681f572 /llama.h
parenteaf98c2649d7da705de255712f0038ac7e47c610 (diff)
llama : add benchmark example (#2626)
* llama : add benchmark example * add to examples CMakeLists.txt * fix msvc build * add missing include * add Bessel's correction to stdev calculation Co-authored-by: Johannes Gäßler <johannesg@5d6.de> * improve markdown formatting * add missing include * print warning is NDEBUG is not defined * remove n_prompt and n_gen from the matrix, use each value separately instead * better checks for non-optimized builds * llama.cpp : fix MEM_REQ_SCRATCH0 reusing the value of n_ctx of the first call * fix json formatting * add sql output * add basic cpu and gpu info (linx/cuda only) * markdown: also show values that differ from the default * markdown: add build id * cleanup * improve formatting * formatting --------- Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
Diffstat (limited to 'llama.h')
-rw-r--r--llama.h2
1 files changed, 2 insertions, 0 deletions
diff --git a/llama.h b/llama.h
index 92b47489..9d732f91 100644
--- a/llama.h
+++ b/llama.h
@@ -351,6 +351,8 @@ extern "C" {
LLAMA_API int llama_n_ctx_from_model (const struct llama_model * model);
LLAMA_API int llama_n_embd_from_model (const struct llama_model * model);
+ LLAMA_API int llama_model_type(const struct llama_model * model, char * buf, size_t buf_size);
+
// Get the vocabulary as output parameters.
// Returns number of results.
LLAMA_API int llama_get_vocab(