summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorbmwl <brian.marshall@tolko.com>2024-02-16 01:31:07 -0800
committerGitHub <noreply@github.com>2024-02-16 11:31:07 +0200
commitf486f6e1e5e9d01603d9325ab3e05f1edb362a95 (patch)
tree370aeba4b6b5f01ec2137e82484013d1be82061e /llama.cpp
parent60ed04cf82dc91ade725dd7ad53f0ee81f76eccf (diff)
ggml : add numa options (#5377)
* Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverted Makefile * Fixed include * Removed sched.h from ggml.h, moved ggml_get_numa_affinity into ggml.c, removed trailing whitespace and fixed up a few inconsistent variables * removed trailing whitespace * Added numa options to allow finer grained control as well as plumbing for a new mirror mode that will require numa.h * Reverting Makefile * Fixed a number of issues with the move from BOOL to ggml_numa_strategies. Added a note about mirror mode note being implemented yet * Removing MIRROR_MODE code for this PR * Removing last bit of MIRROR_MODE code for this PR * Removing unneeded branch in server.cpp example and moving get_numa_affinity and making it static * Fixed lingering init_llama_backend() bool calls in tests and examples * Remote enum llama_numa_strategies * Revert bad merge with dynatemp flags * add missing enum ggml_numa_strategies declaration and revert sync problem with master * add missing enum ggml_numa_strategies declaration * fixed ggml_init_numa variable * Update ggml.h Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * Update READMEs with info about numa flags, change INTERLEAVE strategy name to DISTRIBUTE everywhere, implement the improved distribution strategy from @rankaiyx, fix a spelling mistake and un-merge some bad merges * split numa init out from llama_backend_init and created llama_numa_init. Updated all code paths and samples * Fix up some boolean vs enum comparisons * Added #ifdefs for non-Linux OS that don't have cpu_set_t datatype * Update ggml.h Align enum values Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c Remove whitespace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update ggml.c align paremeters Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update examples/server/server.cpp remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * Update common/common.cpp Remove whitespace and align brace Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> * unified ggml_numa_strategy enum and fixed text alignment in server.cpp example * Update ggml.c simplified return for platforms without NUMA support Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> * removed redundant else from cli argument processing of --numa * whitespace --------- Co-authored-by: root <root@nenya.lothlorien.ca> Co-authored-by: Jared Van Bortel <cebtenzzre@gmail.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> Co-authored-by: Jared Van Bortel <jared@nomic.ai>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp14
1 files changed, 8 insertions, 6 deletions
diff --git a/llama.cpp b/llama.cpp
index aceb9c25..08e7b02b 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1034,7 +1034,7 @@ struct llama_mmap {
int fd = fileno(file->fp);
int flags = MAP_SHARED;
// prefetch/readahead impairs performance on NUMA systems
- if (numa) { prefetch = 0; }
+ if (numa) { prefetch = 0; }
#ifdef __linux__
// advise the kernel to read the file sequentially (increases readahead)
if (posix_fadvise(fd, 0, 0, POSIX_FADV_SEQUENTIAL)) {
@@ -11182,7 +11182,7 @@ bool llama_mlock_supported(void) {
return llama_supports_mlock();
}
-void llama_backend_init(bool numa) {
+void llama_backend_init(void) {
ggml_time_init();
// needed to initialize f16 tables
@@ -11192,15 +11192,17 @@ void llama_backend_init(bool numa) {
ggml_free(ctx);
}
- if (numa) {
- ggml_numa_init();
- }
-
#ifdef GGML_USE_MPI
ggml_mpi_backend_init();
#endif
}
+void llama_numa_init(enum ggml_numa_strategy numa) {
+ if (numa != GGML_NUMA_STRATEGY_DISABLED) {
+ ggml_numa_init(numa);
+ }
+}
+
void llama_backend_free(void) {
#ifdef GGML_USE_MPI
ggml_mpi_backend_free();