summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2024-06-13 03:11:35 +0200
committerGitHub <noreply@github.com>2024-06-13 03:11:35 +0200
commitf578b86b2123d0f92afbaa98a031df4d4464e582 (patch)
tree2a21feec089e5fcaa6f9d34be5468a17c3a5ddc7 /examples
parent1c641e6aac5c18b964e7b32d9dbbb4bf5301d0d7 (diff)
move BLAS to a separate backend (#6210)
* move BLAS to a separate backend * rename GGML_USE_OPENBLAS to GGML_USE_BLAS * alloc : reuse same buffer when the same buffer type if used multiple times * set number of threads automatically for openblas and blis * sched : print assignments when GGML_SCHED_DEBUG env variable is set * sched : allow ops with weights on an incompatible buffer type This will cause the weight to be copied to a backend that supports the op, which is very costly. The weight should have been stored in a buffer of a backend that can run the op, but llama.cpp cannot do this automatically at the moment. --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'examples')
-rw-r--r--examples/llama-bench/llama-bench.cpp1
1 files changed, 1 insertions, 0 deletions
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 61f5a5a0..61dd1d71 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -293,6 +293,7 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
params.output_format = cmd_params_defaults.output_format;
params.output_format_stderr = cmd_params_defaults.output_format_stderr;
params.reps = cmd_params_defaults.reps;
+ params.numa = cmd_params_defaults.numa;
for (int i = 1; i < argc; i++) {
arg = argv[i];