summaryrefslogtreecommitdiff
path: root/ggml-alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'ggml-alloc.c')
-rw-r--r--ggml-alloc.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/ggml-alloc.c b/ggml-alloc.c
index d3049efb..a97436b1 100644
--- a/ggml-alloc.c
+++ b/ggml-alloc.c
@@ -449,11 +449,10 @@ static void init_view(ggml_gallocr_t galloc, struct ggml_tensor * view, bool upd
if (update_backend) {
view->backend = view->view_src->backend;
}
- view->buffer = view->view_src->buffer;
+ // views are initialized in the alloc buffer rather than the view_src buffer
+ view->buffer = alloc->buffer;
view->data = (char *)view->view_src->data + view->view_offs;
- // FIXME: the view should be initialized by the owning buffer, but currently this breaks the CUDA backend
- // due to the ggml_tensor_extra_gpu ring buffer overwriting the KV cache extras
assert(ggml_tallocr_is_measure(alloc) || !view->buffer || view->buffer->buft == alloc->buffer->buft);
if (!alloc->measure) {
@@ -736,6 +735,10 @@ void ggml_allocr_set_parse_seq(ggml_allocr_t alloc, const int * list, int n) {
}
void ggml_allocr_free(ggml_allocr_t alloc) {
+ if (alloc == NULL) {
+ return;
+ }
+
ggml_gallocr_free(alloc->galloc);
ggml_tallocr_free(alloc->talloc);
free(alloc);
@@ -775,7 +778,7 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte
}
if (nbytes == 0) {
- fprintf(stderr, "%s: no tensors to allocate\n", __func__);
+ // all the tensors in the context are already allocated
return NULL;
}
@@ -789,6 +792,11 @@ ggml_backend_buffer_t ggml_backend_alloc_ctx_tensors_from_buft(struct ggml_conte
} else {
ggml_backend_view_init(buffer, t);
}
+ } else {
+ if (t->view_src != NULL) {
+ // view of a pre-allocated tensor
+ ggml_backend_view_init(buffer, t);
+ }
}
}