summaryrefslogtreecommitdiff
path: root/ggml-backend.c
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-02-12 09:16:06 +0200
committerGitHub <noreply@github.com>2024-02-12 09:16:06 +0200
commit3b169441dfe8e420f88d1592708cc2a871daadb9 (patch)
treeb554c9eac1b3b7dbf11e364b6a4a748605a6e949 /ggml-backend.c
parent3bdc4cd0f595a6096cca4a64aa75ffa8a3503465 (diff)
sync : ggml (#5452)
* ggml-alloc : v3 (ggml/727) * ggml-alloc v3 ggml-ci * fix ci ggml-ci * whisper : check for backend buffer allocation failures * whisper : avoid leaks when initialization fails * cleanup ggml-ci * style fixes ggml-ci * sync : ggml * update llama.cpp, clip.cpp, export-lora.cpp * update finetune.cpp, train-text-from-scratch.cpp ggml-ci * ggml-backend : reduce alignment to 32 to match gguf and fix mmap --------- Co-authored-by: slaren <slarengh@gmail.com>
Diffstat (limited to 'ggml-backend.c')
-rw-r--r--ggml-backend.c486
1 files changed, 228 insertions, 258 deletions
diff --git a/ggml-backend.c b/ggml-backend.c
index 532da8ed..9ee81b76 100644
--- a/ggml-backend.c
+++ b/ggml-backend.c
@@ -475,6 +475,8 @@ ggml_backend_buffer_t ggml_backend_reg_alloc_buffer(size_t i, size_t size) {
// backend CPU
+static const size_t TENSOR_ALIGNMENT = 32; // required for mmap as gguf only guarantees 32-byte alignment
+
GGML_CALL static const char * ggml_backend_cpu_buffer_name(ggml_backend_buffer_t buffer) {
return "CPU";
@@ -482,7 +484,14 @@ GGML_CALL static const char * ggml_backend_cpu_buffer_name(ggml_backend_buffer_t
}
GGML_CALL static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
- return (void *)buffer->context;
+ uintptr_t data = (uintptr_t)buffer->context;
+
+ // align the buffer
+ if (data % TENSOR_ALIGNMENT != 0) {
+ data = GGML_PAD(data, TENSOR_ALIGNMENT);
+ }
+
+ return (void *)data;
}
GGML_CALL static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
@@ -540,8 +549,6 @@ static struct ggml_backend_buffer_i cpu_backend_buffer_i_from_ptr = {
/* .reset = */ NULL,
};
-static const size_t TENSOR_ALIGNMENT = 64; // should be enough for AVX 512
-
GGML_CALL static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
return "CPU";
@@ -550,9 +557,11 @@ GGML_CALL static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend
GGML_CALL static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
size += TENSOR_ALIGNMENT; // malloc may return an address that is not aligned
- void * data = malloc(size); // TODO: maybe use GGML_ALIGNED_MALLOC?
-
- GGML_ASSERT(data != NULL && "failed to allocate buffer");
+ void * data = malloc(size); // TODO: use GGML_ALIGNED_MALLOC (move to ggml-impl.h)
+ if (data == NULL) {
+ fprintf(stderr, "%s: failed to allocate buffer of size %zu\n", __func__, size);
+ return NULL;
+ }
return ggml_backend_buffer_init(buft, cpu_backend_buffer_i, data, size);
}
@@ -766,6 +775,9 @@ static struct ggml_backend_i cpu_backend_i = {
ggml_backend_t ggml_backend_cpu_init(void) {
struct ggml_backend_cpu_context * ctx = malloc(sizeof(struct ggml_backend_cpu_context));
+ if (ctx == NULL) {
+ return NULL;
+ }
ctx->n_threads = GGML_DEFAULT_N_THREADS;
ctx->work_data = NULL;
@@ -774,6 +786,10 @@ ggml_backend_t ggml_backend_cpu_init(void) {
ctx->abort_callback_data = NULL;
ggml_backend_t cpu_backend = malloc(sizeof(struct ggml_backend));
+ if (cpu_backend == NULL) {
+ free(ctx);
+ return NULL;
+ }
*cpu_backend = (struct ggml_backend) {
/* .interface = */ cpu_backend_i,
@@ -802,6 +818,7 @@ void ggml_backend_cpu_set_abort_callback(ggml_backend_t backend_cpu, ggml_abort_
}
GGML_CALL ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
+ GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned");
return ggml_backend_buffer_init(ggml_backend_cpu_buffer_type(), cpu_backend_buffer_i_from_ptr, ptr, size);
}
@@ -865,6 +882,8 @@ GGML_CALL ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_back
ctx->n_buffers = n_buffers;
ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t));
+ GGML_ASSERT(ctx->buffers != NULL);
+
size_t total_size = 0;
for (size_t i = 0; i < n_buffers; i++) {
ctx->buffers[i] = buffers[i];
@@ -886,6 +905,18 @@ GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer,
}
}
+// creates a copy of the tensor with the same memory layout
+static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
+ struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
+ for (int i = 0; i < GGML_MAX_DIMS; i++) {
+ dup->nb[i] = tensor->nb[i];
+ }
+ return dup;
+}
+
+static bool ggml_is_view_op(enum ggml_op op) {
+ return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
+}
// scheduler
@@ -894,7 +925,7 @@ GGML_CALL void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer,
#define GGML_MAX_SPLIT_INPUTS 16
struct ggml_backend_sched_split {
- ggml_tallocr_t tallocr;
+ int backend_id;
int i_start;
int i_end;
struct ggml_tensor * inputs[GGML_MAX_SPLIT_INPUTS];
@@ -909,15 +940,17 @@ struct ggml_backend_sched {
int n_backends;
ggml_backend_t backends[GGML_MAX_BACKENDS];
ggml_backend_buffer_type_t bufts[GGML_MAX_BACKENDS];
- ggml_tallocr_t tallocs[GGML_MAX_BACKENDS];
ggml_gallocr_t galloc;
// hash keys of the nodes in the graph
struct ggml_hash_set hash_set;
- // hash values (arrays of [hash_set.size])
- ggml_tallocr_t * node_talloc; // tallocr assigned to each node (indirectly this is the backend)
- struct ggml_tensor * (* node_copies)[GGML_MAX_BACKENDS]; // copies of each node for each destination backend
+ // hash values
+ int * tensor_backend_id;
+ struct ggml_tensor * (* tensor_copies)[GGML_MAX_BACKENDS];
+
+ int * node_backend_ids; // [n_nodes]
+ int n_nodes;
// copy of the graph with modified inputs
struct ggml_cgraph * graph;
@@ -927,77 +960,46 @@ struct ggml_backend_sched {
struct ggml_context * ctx;
+ ggml_backend_sched_eval_callback callback_eval;
+ void * callback_eval_user_data;
+
// align context_buffer to GGML_MEM_ALIGN
#ifdef _MSC_VER
__declspec(align(GGML_MEM_ALIGN))
#else
__attribute__((aligned(GGML_MEM_ALIGN)))
#endif
- char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*sizeof(struct ggml_tensor) + sizeof(struct ggml_cgraph)];
-
- ggml_backend_sched_eval_callback callback_eval;
- void * callback_eval_user_data;
+ char context_buffer[GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + sizeof(struct ggml_cgraph)];
};
#define hash_id(node) ggml_hash_find_or_insert(sched->hash_set, node)
-#define node_allocr(node) sched->node_talloc[hash_id(node)]
-
-static bool ggml_is_view_op(enum ggml_op op) {
- return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
-}
+#define tensor_backend_id(node) sched->tensor_backend_id[hash_id(node)]
+#define tensor_backend(node) (tensor_backend_id(node) == -1 ? NULL : sched->backends[tensor_backend_id(node)])
-// returns the priority of the backend, lower is better
-static int sched_backend_prio(ggml_backend_sched_t sched, ggml_backend_t backend) {
+// returns the priority of the backend, lower id is higher priority
+static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) {
for (int i = 0; i < sched->n_backends; i++) {
if (sched->backends[i] == backend) {
return i;
}
}
- return INT_MAX;
+ return -1;
}
-static int sched_allocr_prio(ggml_backend_sched_t sched, ggml_tallocr_t allocr) {
- for (int i = 0; i < sched->n_backends; i++) {
- if (sched->tallocs[i] == allocr) {
- return i;
- }
- }
- return INT_MAX;
-}
-
-static ggml_tallocr_t sched_allocr_from_buffer(ggml_backend_sched_t sched, ggml_backend_buffer_t buffer) {
+static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, ggml_backend_buffer_t buffer) {
if (buffer == NULL) {
- return NULL;
- }
-
- // check if this is already allocate in a allocr buffer (from user manual allocations)
- for (int i = 0; i < sched->n_backends; i++) {
- if (ggml_tallocr_get_buffer(sched->tallocs[i]) == buffer) {
- return sched->tallocs[i];
- }
+ return -1;
}
// find highest prio backend that supports the buffer type
for (int i = 0; i < sched->n_backends; i++) {
if (ggml_backend_buft_supports_backend(buffer->buft, sched->backends[i])) {
- return sched->tallocs[i];
+ return i;
}
}
GGML_ASSERT(false && "tensor buffer type not supported by any backend");
}
-static ggml_backend_t get_allocr_backend(ggml_backend_sched_t sched, ggml_tallocr_t allocr) {
- if (allocr == NULL) {
- return NULL;
- }
- for (int i = 0; i < sched->n_backends; i++) {
- if (sched->tallocs[i] == allocr) {
- return sched->backends[i];
- }
- }
- GGML_UNREACHABLE();
-}
-
#if 0
static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS][128]; // debug only
#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
@@ -1008,37 +1010,39 @@ static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_MAX_SPLITS*GGML_MAX_SPLIT_I
#endif
// returns the backend that should be used for the node based on the current locations
-static ggml_tallocr_t sched_allocr_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * node) {
+static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) {
+ // TODO: use supports_op to check if the backend supports the op
+
// assign pre-allocated nodes to their backend
// dst
- ggml_tallocr_t cur_allocr = sched_allocr_from_buffer(sched, node->buffer);
- if (cur_allocr != NULL) {
+ int cur_backend = ggml_backend_sched_backend_from_buffer(sched, tensor->buffer);
+ if (cur_backend != -1) {
SET_CAUSE(node, "1.dst");
- return cur_allocr;
+ return cur_backend;
}
// view_src
- if (node->view_src != NULL) {
- cur_allocr = sched_allocr_from_buffer(sched, node->view_src->buffer);
- if (cur_allocr != NULL) {
+ if (tensor->view_src != NULL) {
+ cur_backend = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src->buffer);
+ if (cur_backend != -1) {
SET_CAUSE(node, "1.vsrc");
- return cur_allocr;
+ return cur_backend;
}
}
// assign nodes that use weights to the backend of the weights
for (int i = 0; i < GGML_MAX_SRC; i++) {
- const struct ggml_tensor * src = node->src[i];
+ const struct ggml_tensor * src = tensor->src[i];
if (src == NULL) {
break;
}
if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
- ggml_tallocr_t src_allocr = sched_allocr_from_buffer(sched, src->buffer);
+ int src_backend = ggml_backend_sched_backend_from_buffer(sched, src->buffer);
// operations with weights are always run on the same backend as the weights
SET_CAUSE(node, "1.wgt%d", i);
- return src_allocr;
+ return src_backend;
}
}
- return NULL;
+ return -1;
}
static char * fmt_size(size_t size) {
@@ -1051,11 +1055,11 @@ static char * fmt_size(size_t size) {
return buffer;
}
-static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
int cur_split = 0;
for (int i = 0; i < graph->n_nodes; i++) {
if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
- ggml_backend_t split_backend = get_allocr_backend(sched, sched->splits[cur_split].tallocr);
+ ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
fprintf(stderr, "\n## SPLIT #%d: %s # %d inputs: ", cur_split, ggml_backend_name(split_backend),
sched->splits[cur_split].n_inputs);
for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
@@ -1069,17 +1073,15 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra
if (ggml_is_view_op(node->op)) {
continue;
}
- ggml_tallocr_t node_allocr = node_allocr(node);
- ggml_backend_t node_backend = node_allocr ? get_allocr_backend(sched, node_allocr) : NULL; // FIXME:
+ ggml_backend_t tensor_backend = tensor_backend(node);
fprintf(stderr, "node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s]:", i, ggml_op_name(node->op), node->name,
- fmt_size(ggml_nbytes(node)), node_allocr ? ggml_backend_name(node_backend) : "NULL", GET_CAUSE(node));
+ fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node));
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
if (src == NULL) {
break;
}
- ggml_tallocr_t src_allocr = node_allocr(src);
- ggml_backend_t src_backend = src_allocr ? get_allocr_backend(sched, src_allocr) : NULL;
+ ggml_backend_t src_backend = tensor_backend(src);
fprintf(stderr, " %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
}
@@ -1087,23 +1089,13 @@ static void sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgra
}
}
-// creates a copy of the tensor with the same memory layout
-static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
- struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
- for (int i = 0; i < GGML_MAX_DIMS; i++) {
- dup->nb[i] = tensor->nb[i];
- }
- return dup;
-}
-
-
//#define DEBUG_PASS1
//#define DEBUG_PASS2
//#define DEBUG_PASS3
//#define DEBUG_PASS4
// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
-static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+static void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
// reset splits
sched->n_splits = 0;
sched->is_reset = false;
@@ -1125,28 +1117,28 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
// pass 1: assign backends to ops with pre-allocated inputs
for (int i = 0; i < graph->n_leafs; i++) {
struct ggml_tensor * leaf = graph->leafs[i];
- if (node_allocr(leaf) != NULL) {
+ if (tensor_backend_id(leaf) != -1) {
// do not overwrite user assignments
continue;
}
- node_allocr(leaf) = sched_allocr_from_cur(sched, leaf);
+ tensor_backend_id(leaf) = ggml_backend_sched_backend_id_from_cur(sched, leaf);
}
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
- if (node_allocr(node) != NULL) {
+ if (tensor_backend_id(node) != -1) {
// do not overwrite user assignments
continue;
}
- node_allocr(node) = sched_allocr_from_cur(sched, node);
+ tensor_backend_id(node) = ggml_backend_sched_backend_id_from_cur(sched, node);
// src
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
if (src == NULL) {
break;
}
- if (node_allocr(src) == NULL) {
- node_allocr(src) = sched_allocr_from_cur(sched, src);
+ if (tensor_backend_id(src) == -1) {
+ tensor_backend_id(src) = ggml_backend_sched_backend_id_from_cur(sched, src);
}
}
}
@@ -1161,22 +1153,22 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
// pass 2.1 expand gpu up
{
- ggml_tallocr_t cur_allocr = NULL;
+ int cur_backend_id = -1;
for (int i = graph->n_nodes - 1; i >= 0; i--) {
struct ggml_tensor * node = graph->nodes[i];
if (ggml_is_view_op(node->op)) {
continue;
}
- ggml_tallocr_t node_allocr = node_allocr(node);
- if (node_allocr != NULL) {
- if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
+ int tensor_backend_id = tensor_backend_id(node);
+ if (tensor_backend_id != -1) {
+ if (tensor_backend_id == sched->n_backends - 1) {
// skip cpu (lowest prio backend)
- cur_allocr = NULL;
+ cur_backend_id = -1;
} else {
- cur_allocr = node_allocr;
+ cur_backend_id = tensor_backend_id;
}
} else {
- node_allocr(node) = cur_allocr;
+ tensor_backend_id(node) = cur_backend_id;
SET_CAUSE(node, "2.1");
}
}
@@ -1184,22 +1176,22 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
// pass 2.2 expand gpu down
{
- ggml_tallocr_t cur_allocr = NULL;
+ int cur_backend_id = -1;
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
if (ggml_is_view_op(node->op)) {
continue;
}
- ggml_tallocr_t node_allocr = node_allocr(node);
- if (node_allocr != NULL) {
- if (sched_allocr_prio(sched, node_allocr) == sched->n_backends - 1) {
+ int tensor_backend_id = tensor_backend_id(node);
+ if (tensor_backend_id != -1) {
+ if (tensor_backend_id == sched->n_backends - 1) {
// skip cpu (lowest prio backend)
- cur_allocr = NULL;
+ cur_backend_id = -1;
} else {
- cur_allocr = node_allocr;
+ cur_backend_id = tensor_backend_id;
}
} else {
- node_allocr(node) = cur_allocr;
+ tensor_backend_id(node) = cur_backend_id;
SET_CAUSE(node, "2.2");
}
}
@@ -1207,17 +1199,17 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
// pass 2.3 expand rest up
{
- ggml_tallocr_t cur_allocr = NULL;
+ int cur_backend_id = -1;
for (int i = graph->n_nodes - 1; i >= 0; i--) {
struct ggml_tensor * node = graph->nodes[i];
if (ggml_is_view_op(node->op)) {
continue;
}
- ggml_tallocr_t node_allocr = node_allocr(node);
- if (node_allocr != NULL) {
- cur_allocr = node_allocr;
+ int tensor_backend_id = tensor_backend_id(node);
+ if (tensor_backend_id != -1) {
+ cur_backend_id = tensor_backend_id;
} else {
- node_allocr(node) = cur_allocr;
+ tensor_backend_id(node) = cur_backend_id;
SET_CAUSE(node, "2.3");
}
}
@@ -1225,17 +1217,17 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
// pass 2.4 expand rest down
{
- ggml_tallocr_t cur_allocr = NULL;
+ int cur_backend_id = -1;
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
if (ggml_is_view_op(node->op)) {
continue;
}
- ggml_tallocr_t node_allocr = node_allocr(node);
- if (node_allocr != NULL) {
- cur_allocr = node_allocr;
+ int tensor_backend_id = tensor_backend_id(node);
+ if (tensor_backend_id != -1) {
+ cur_backend_id = tensor_backend_id;
} else {
- node_allocr(node) = cur_allocr;
+ tensor_backend_id(node) = cur_backend_id;
SET_CAUSE(node, "2.4");
}
}
@@ -1247,9 +1239,9 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
// pass 3: assign backends to remaining src from dst and view_src
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
- ggml_tallocr_t cur_allocr = node_allocr(node);
- if (node->view_src != NULL && cur_allocr == NULL) {
- cur_allocr = node_allocr(node) = node_allocr(node->view_src);
+ int cur_backend_id = tensor_backend_id(node);
+ if (node->view_src != NULL && cur_backend_id == -1) {
+ cur_backend_id = tensor_backend_id(node) = tensor_backend_id(node->view_src);
SET_CAUSE(node, "3.vsrc");
}
for (int j = 0; j < GGML_MAX_SRC; j++) {
@@ -1257,14 +1249,14 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
if (src == NULL) {
break;
}
- ggml_tallocr_t src_allocr = node_allocr(src);
- if (src_allocr == NULL) {
+ int src_backend_id = tensor_backend_id(src);
+ if (src_backend_id == -1) {
if (src->view_src != NULL) {
// views are always on the same backend as the source
- node_allocr(src) = node_allocr(src->view_src);
+ tensor_backend_id(src) = tensor_backend_id(src->view_src);
SET_CAUSE(src, "3.vsrc");
} else {
- node_allocr(src) = cur_allocr;
+ tensor_backend_id(src) = cur_backend_id;
SET_CAUSE(src, "3.cur");
}
}
@@ -1281,15 +1273,14 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
if (!ggml_is_view_op(node->op)) {
- sched->splits[0].tallocr = node_allocr(node);
+ sched->splits[0].backend_id = tensor_backend_id(node);
break;
}
}
sched->splits[0].i_start = 0;
sched->splits[0].n_inputs = 0;
memset(sched->splits[0].inputs, 0, sizeof(sched->splits[0].inputs)); //HACK
- ggml_tallocr_t cur_allocr = sched->splits[0].tallocr;
- size_t cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ int cur_backend_id = sched->splits[0].backend_id;
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
@@ -1297,19 +1288,18 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
continue;
}
- ggml_tallocr_t node_allocr = node_allocr(node);
+ int tensor_backend_id = tensor_backend_id(node);
- GGML_ASSERT(node_allocr != NULL); // all nodes should be assigned by now
+ GGML_ASSERT(tensor_backend_id != -1); // all nodes should be assigned by now
- if (node_allocr != cur_allocr) {
+ if (tensor_backend_id != cur_backend_id) {
sched->splits[cur_split].i_end = i;
cur_split++;
GGML_ASSERT(cur_split < GGML_MAX_SPLITS);
- sched->splits[cur_split].tallocr = node_allocr;
+ sched->splits[cur_split].backend_id = tensor_backend_id;
sched->splits[cur_split].i_start = i;
sched->splits[cur_split].n_inputs = 0;
- cur_allocr = node_allocr;
- cur_backend_id = sched_allocr_prio(sched, cur_allocr);
+ cur_backend_id = tensor_backend_id;
}
// find inputs that are not on the same backend
@@ -1318,43 +1308,25 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
if (src == NULL) {
break;
}
- ggml_tallocr_t src_allocr = node_allocr(src);
- GGML_ASSERT(src_allocr != NULL); // all inputs should be assigned by now
- if (src_allocr != node_allocr) {
+ int src_backend_id = tensor_backend_id(src);
+ assert(src_backend_id != -1); // all inputs should be assigned by now
+ if (src_backend_id != tensor_backend_id) {
// create a copy of the input in the split's backend
size_t id = hash_id(src);
- if (sched->node_copies[id][cur_backend_id] == NULL) {
- ggml_backend_t backend = get_allocr_backend(sched, cur_allocr);
+ if (sched->tensor_copies[id][cur_backend_id] == NULL) {
+ ggml_backend_t backend = sched->backends[cur_backend_id];
struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
ggml_format_name(tensor_copy, "%s#%s", ggml_backend_name(backend), src->name);
- sched->node_copies[id][cur_backend_id] = tensor_copy;
- node_allocr(tensor_copy) = cur_allocr;
+ sched->tensor_copies[id][cur_backend_id] = tensor_copy;
+ tensor_backend_id(tensor_copy) = cur_backend_id;
SET_CAUSE(tensor_copy, "4.cpy");
int n_inputs = sched->splits[cur_split].n_inputs++;
GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
sched->splits[cur_split].inputs[n_inputs] = src;
}
- node->src[j] = sched->node_copies[id][cur_backend_id];
-
-#if 0
- // check if the input is already in the split
- bool found = false;
- for (int k = 0; k < sched->splits[cur_split].n_inputs; k++) {
- if (sched->splits[cur_split].inputs[k] == src) {
- found = true;
- break;
- }
- }
-
- if (!found) {
- int n_inputs = sched->splits[cur_split].n_inputs++;
- //printf("split %d input %d: %s (%s)\n", cur_split, n_inputs, src->name, ggml_backend_name(get_allocr_backend(sched, src_allocr)));
- GGML_ASSERT(n_inputs < GGML_MAX_SPLIT_INPUTS);
- sched->splits[cur_split].inputs[n_inputs] = src;
- }
-#endif
+ node->src[j] = sched->tensor_copies[id][cur_backend_id];
}
}
}
@@ -1369,30 +1341,30 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
// sanity check: all sources should have the same backend as the node
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
- ggml_tallocr_t node_allocr = node_allocr(node);
- if (node_allocr == NULL) {
+ ggml_backend_t tensor_backend = tensor_backend(node);
+ if (tensor_backend == NULL) {
fprintf(stderr, "!!!!!!! %s has no backend\n", node->name);
}
- if (node->view_src != NULL && node_allocr != node_allocr(node->view_src)) {
+ if (node->view_src != NULL && tensor_backend != tensor_backend(node->view_src)) {
fprintf(stderr, "!!!!!!! %s has backend %s, view_src %s has backend %s\n",
- node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
- node->view_src->name, node_allocr(node->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(node->view_src))) : "NULL");
+ node->name, tensor_backend ? ggml_backend_name(tensor_backend) : "NULL",
+ node->view_src->name, tensor_backend(node->view_src) ? ggml_backend_name(tensor_backend(node->view_src)) : "NULL");
}
for (int j = 0; j < GGML_MAX_SRC; j++) {
struct ggml_tensor * src = node->src[j];
if (src == NULL) {
break;
}
- ggml_tallocr_t src_allocr = node_allocr(src);
- if (src_allocr != node_allocr /* && src_backend != NULL */) { // ignore nulls for now
+ ggml_backend_t src_backend = tensor_backend(src);
+ if (src_backend != tensor_backend /* && src_backend != NULL */) {
fprintf(stderr, "!!!! %s has backend %s, src %d (%s) has backend %s\n",
- node->name, node_allocr ? ggml_backend_name(get_allocr_backend(sched, node_allocr)) : "NULL",
- j, src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL");
+ node->name, tensor_backend ? ggml_backend_name(tensor_backend) : "NULL",
+ j, src->name, src_backend ? ggml_backend_name(src_backend) : "NULL");
}
- if (src->view_src != NULL && src_allocr != node_allocr(src->view_src)) {
+ if (src->view_src != NULL && src_backend != tensor_backend(src->view_src)) {
fprintf(stderr, "!!!!!!! [src] %s has backend %s, view_src %s has backend %s\n",
- src->name, src_allocr ? ggml_backend_name(get_allocr_backend(sched, src_allocr)) : "NULL",
- src->view_src->name, node_allocr(src->view_src) ? ggml_backend_name(get_allocr_backend(sched, node_allocr(src->view_src))) : "NULL");
+ src->name, src_backend ? ggml_backend_name(src_backend) : "NULL",
+ src->view_src->name, tensor_backend(src->view_src) ? ggml_backend_name(tensor_backend(src->view_src)) : "NULL");
}
}
}
@@ -1406,32 +1378,45 @@ static void sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * g
struct ggml_backend_sched_split * split = &sched->splits[i];
split->graph = ggml_graph_view(graph, split->i_start, split->i_end);
- // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
for (int j = 0; j < split->n_inputs; j++) {
struct ggml_tensor * input = split->inputs[j];
- struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][sched_allocr_prio(sched, split->tallocr)];
+ struct ggml_tensor * input_cpy = sched->tensor_copies[hash_id(input)][split->backend_id];
+
// add a dependency to the input source so that it is not freed before the copy is done
- GGML_ASSERT(input_cpy->src[0] == NULL || input_cpy->src[0] == input);
- input_cpy->src[0] = input;
+ struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input);
+ sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(input);
+ graph_copy->nodes[graph_copy->n_nodes++] = input_dep;
+
+ // add a dependency to the input copy so that it is allocated at the start of the split
+ sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id;
graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
}
for (int j = split->i_start; j < split->i_end; j++) {
+ sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]);
graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
}
}
sched->graph = graph_copy;
}
-static void sched_alloc_splits(ggml_backend_sched_t sched) {
- ggml_gallocr_alloc_graph_n(
- sched->galloc,
- sched->graph,
- sched->hash_set,
- sched->node_talloc);
+static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
+ // ggml_gallocr_reserve_n(sched->galloc, sched->graph, sched->node_backend_ids);
+ if (!ggml_gallocr_alloc_graph(sched->galloc, sched->graph)) {
+#ifndef NDEBUG
+ fprintf(stderr, "ggml_backend_sched: failed to allocate graph, reserving\n");
+#endif
+ ggml_gallocr_reserve_n(sched->galloc, sched->graph, sched->node_backend_ids);
+ if (!ggml_gallocr_alloc_graph(sched->galloc, sched->graph)) {
+ fprintf(stderr, "ggml_backend_sched: failed to allocate graph\n");
+ return false;
+ }
+ }
+
+ return true;
}
-static void sched_compute_splits(ggml_backend_sched_t sched) {
+static bool ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
uint64_t copy_us[GGML_MAX_BACKENDS] = {0};
uint64_t compute_us[GGML_MAX_BACKENDS] = {0};
@@ -1439,20 +1424,18 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
for (int i = 0; i < sched->n_splits; i++) {
struct ggml_backend_sched_split * split = &splits[i];
- ggml_backend_t split_backend = get_allocr_backend(sched, split->tallocr);
- int split_backend_id = sched_backend_prio(sched, split_backend);
+ int split_backend_id = split->backend_id;
+ ggml_backend_t split_backend = sched->backends[split_backend_id];
// copy the input tensors to the split backend
uint64_t copy_start_us = ggml_time_us();
for (int j = 0; j < split->n_inputs; j++) {
struct ggml_tensor * input = split->inputs[j];
- struct ggml_tensor * input_cpy = sched->node_copies[hash_id(input)][split_backend_id];
+ struct ggml_tensor * input_cpy = sched->tensor_copies[hash_id(input)][split_backend_id];
GGML_ASSERT(input->buffer != NULL);
GGML_ASSERT(input_cpy->buffer != NULL);
- // TODO: avoid this copy if it was already copied in a previous split, and the input didn't change
- // this is important to avoid copying constants such as KQ_mask and inp_pos multiple times
ggml_backend_tensor_copy_async(split_backend, input, input_cpy);
}
//ggml_backend_synchronize(split_backend); // necessary to measure copy time
@@ -1468,7 +1451,9 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
uint64_t compute_start_us = ggml_time_us();
if (!sched->callback_eval) {
- ggml_backend_graph_compute(split_backend, &split->graph);
+ if (!ggml_backend_graph_compute(split_backend, &split->graph)) {
+ return false;
+ }
//ggml_backend_synchronize(split_backend); // necessary to measure compute time
} else {
// similar to ggml_backend_compare_graph_backend
@@ -1488,7 +1473,9 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);
- ggml_backend_graph_compute(split_backend, &gv);
+ if (!ggml_backend_graph_compute(split_backend, &gv)) {
+ return false;
+ }
if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
break;
@@ -1510,19 +1497,8 @@ static void sched_compute_splits(ggml_backend_sched_t sched) {
}
}
#endif
-}
-
-static void sched_reset(ggml_backend_sched_t sched) {
- for (int i = 0; i < sched->n_backends; i++) {
- ggml_tallocr_reset(sched->tallocs[i]);
- }
- // reset state for the next run
- size_t hash_size = sched->hash_set.size;
- memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size);
- memset(sched->node_talloc, 0, sizeof(sched->node_talloc[0]) * hash_size);
- memset(sched->node_copies, 0, sizeof(sched->node_copies[0]) * hash_size);
- sched->is_reset = true;
+ return true;
}
ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_backend_buffer_type_t * bufts, int n_backends, size_t graph_size) {
@@ -1532,9 +1508,10 @@ ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_back
struct ggml_backend_sched * sched = calloc(sizeof(struct ggml_backend_sched), 1);
// initialize hash table
- sched->hash_set = ggml_hash_set_new(graph_size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
- sched->node_talloc = calloc(sizeof(sched->node_talloc[0]) * sched->hash_set.size, 1);
- sched->node_copies = calloc(sizeof(sched->node_copies[0]) * sched->hash_set.size, 1);
+ sched->hash_set = ggml_hash_set_new(graph_size + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
+ sched->tensor_backend_id = calloc(sizeof(sched->tensor_backend_id[0]), sched->hash_set.size);
+ sched->tensor_copies = calloc(sizeof(sched->tensor_copies[0]), sched->hash_set.size);
+ sched->node_backend_ids = calloc(sizeof(sched->node_backend_ids[0]), graph_size);
sched->n_backends = n_backends;
for (int i = 0; i < n_backends; i++) {
@@ -1542,14 +1519,9 @@ ggml_backend_sched_t ggml_backend_sched_new(ggml_backend_t * backends, ggml_back
sched->bufts[i] = bufts ? bufts[i] : ggml_backend_get_default_buffer_type(backends[i]);
}
- sched->galloc = ggml_gallocr_new();
+ sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
- // init measure allocs for each backend
- for (int i = 0; i < n_backends; i++) {
- sched->tallocs[i] = ggml_tallocr_new_measure_from_buft(sched->bufts[i]);
- }
-
- sched_reset(sched);
+ ggml_backend_sched_reset(sched);
return sched;
}
@@ -1558,49 +1530,54 @@ void ggml_backend_sched_free(ggml_backend_sched_t sched) {
if (sched == NULL) {
return;
}
- for (int i = 0; i < sched->n_backends; i++) {
- ggml_tallocr_free(sched->tallocs[i]);
- }
ggml_gallocr_free(sched->galloc);
ggml_free(sched->ctx);
free(sched->hash_set.keys);
- free(sched->node_talloc);
- free(sched->node_copies);
+ free(sched->tensor_backend_id);
+ free(sched->tensor_copies);
+ free(sched->node_backend_ids);
free(sched);
}
-void ggml_backend_sched_init_measure(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
- GGML_ASSERT(ggml_tallocr_is_measure(sched->tallocs[0])); // can only be initialized once
+void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
+ // reset state for the next run
+ size_t hash_size = sched->hash_set.size;
+ memset(sched->hash_set.keys, 0, sizeof(sched->hash_set.keys[0]) * hash_size); // NOLINT
+ memset(sched->tensor_backend_id, -1, sizeof(sched->tensor_backend_id[0]) * hash_size);
+ memset(sched->tensor_copies, 0, sizeof(sched->tensor_copies[0]) * hash_size);
- sched_split_graph(sched, measure_graph);
- sched_alloc_splits(sched);
+ sched->is_reset = true;
+}
- // allocate buffers and reset allocators
- for (int i = 0; i < sched->n_backends; i++) {
- size_t size = ggml_tallocr_max_size(sched->tallocs[i]);
- ggml_tallocr_free(sched->tallocs[i]);
- sched->tallocs[i] = ggml_tallocr_new_from_buft(sched->bufts[i], size);
+bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
+ ggml_backend_sched_split_graph(sched, measure_graph);
+
+ if (!ggml_gallocr_reserve_n(sched->galloc, sched->graph, sched->node_backend_ids)) {
+ return false;
}
- sched_reset(sched);
+ ggml_backend_sched_reset(sched);
+ return true;
}
-void ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
+bool ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + GGML_MAX_SPLITS*GGML_MAX_SPLIT_INPUTS);
if (!sched->is_reset) {
- sched_reset(sched);
+ ggml_backend_sched_reset(sched);
}
- sched_split_graph(sched, graph);
- sched_alloc_splits(sched);
- sched_compute_splits(sched);
-}
+ ggml_backend_sched_split_graph(sched, graph);
+ if (!ggml_backend_sched_alloc_splits(sched)) {
+ return false;
+ }
-void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
- sched_reset(sched);
-}
+ if (!ggml_backend_sched_compute_splits(sched)) {
+ return false;
+ }
+ return true;
+}
void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
sched->callback_eval = callback;
@@ -1611,37 +1588,30 @@ int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
return sched->n_splits;
}
-ggml_tallocr_t ggml_backend_sched_get_tallocr(ggml_backend_sched_t sched, ggml_backend_t backend) {
- int backend_index = sched_backend_prio(sched, backend);
- GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
- return sched->tallocs[backend_index];
-}
-
-ggml_backend_buffer_t ggml_backend_sched_get_buffer(ggml_backend_sched_t sched, ggml_backend_t backend) {
- int backend_index = sched_backend_prio(sched, backend);
+size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) {
+ int backend_index = ggml_backend_sched_backend_id(sched, backend);
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
- return ggml_tallocr_get_buffer(sched->tallocs[backend_index]);
+ return ggml_gallocr_get_buffer_size(sched->galloc, backend_index);
}
void ggml_backend_sched_set_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
- int backend_index = sched_backend_prio(sched, backend);
+ int backend_index = ggml_backend_sched_backend_id(sched, backend);
GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
- node_allocr(node) = sched->tallocs[backend_index];
+ tensor_backend_id(node) = backend_index;
}
ggml_backend_t ggml_backend_sched_get_node_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
- ggml_tallocr_t allocr = node_allocr(node);
- if (allocr == NULL) {
+ int backend_index = tensor_backend_id(node);
+ if (backend_index == -1) {
return NULL;
}
- return get_allocr_backend(sched, allocr);
+ return sched->backends[backend_index];
}
// utils
void ggml_backend_view_init(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
GGML_ASSERT(tensor->buffer == NULL);
- //GGML_ASSERT(tensor->data == NULL); // views of pre-allocated tensors may have the data set in ggml_new_tensor, but still need to be initialized by the backend
GGML_ASSERT(tensor->view_src != NULL);
GGML_ASSERT(tensor->view_src->buffer != NULL);
GGML_ASSERT(tensor->view_src->data != NULL);
@@ -1665,7 +1635,7 @@ void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor
ggml_backend_buffer_init_tensor(buffer, tensor);
}
-static struct ggml_tensor * graph_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies,
+static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies,
struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) {
GGML_ASSERT(src != NULL);
@@ -1678,7 +1648,7 @@ static struct ggml_tensor * graph_dup_tensor(struct ggml_hash_set hash_set, stru
struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src);
if (src->view_src != NULL) {
- dst->view_src = graph_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
+ dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
dst->view_offs = src->view_offs;
}
dst->op = src->op;
@@ -1691,14 +1661,14 @@ static struct ggml_tensor * graph_dup_tensor(struct ggml_hash_set hash_set, stru
if (s == NULL) {
break;
}
- dst->src[i] = graph_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
+ dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
}
node_copies[id] = dst;
return dst;
}
-static void graph_init_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) {
+static void graph_copy_init_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) {
size_t id = ggml_hash_find(hash_set, src);
if (node_init[id]) {
return;
@@ -1707,7 +1677,7 @@ static void graph_init_tensor(struct ggml_hash_set hash_set, struct ggml_tensor
struct ggml_tensor * dst = node_copies[id];
if (dst->view_src != NULL) {
- graph_init_tensor(hash_set, node_copies, node_init, src->view_src);
+ graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src);
ggml_backend_view_init(dst->view_src->buffer, dst);
}
else {
@@ -1720,17 +1690,17 @@ static void graph_init_tensor(struct ggml_hash_set hash_set, struct ggml_tensor
if (s == NULL) {
break;
}
- graph_init_tensor(hash_set, node_copies, node_init, s);
+ graph_copy_init_tensor(hash_set, node_copies, node_init, s);
}
}
struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) {
struct ggml_hash_set hash_set = {
/* .size = */ graph->visited_hash_table.size,
- /* .keys = */ calloc(sizeof(hash_set.keys[0]) * graph->visited_hash_table.size, 1)
+ /* .keys = */ calloc(sizeof(hash_set.keys[0]), graph->visited_hash_table.size) // NOLINT
};
- struct ggml_tensor ** node_copies = calloc(sizeof(node_copies[0]) * hash_set.size, 1);
- bool * node_init = calloc(sizeof(node_init[0]) * hash_set.size, 1);
+ struct ggml_tensor ** node_copies = calloc(sizeof(node_copies[0]), hash_set.size); // NOLINT
+ bool * node_init = calloc(sizeof(node_init[0]), hash_set.size);
struct ggml_init_params params = {
/* .mem_size = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
@@ -1759,7 +1729,7 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
// dup nodes
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
- graph_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
+ graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
}
// allocate nodes
@@ -1784,7 +1754,7 @@ struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, s
// copy data and init views
for (int i = 0; i < graph->n_nodes; i++) {
struct ggml_tensor * node = graph->nodes[i];
- graph_init_tensor(hash_set, node_copies, node_init, node);
+ graph_copy_init_tensor(hash_set, node_copies, node_init, node);
}
// build graph copy