summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-11-24 18:10:01 +0100
committerGitHub <noreply@github.com>2023-11-24 18:10:01 +0100
commite9c13ff78114af6fc6a4f27cc8dcdda0f3d389fb (patch)
tree0ce3b67be72545c6147fca117e57fefa32f526c7
parent8a052c131ed3525313cdb84e5ae4e2b6cf8d2e24 (diff)
llama : set metal log callback correctly (#4204)
-rw-r--r--llama.cpp11
1 files changed, 9 insertions, 2 deletions
diff --git a/llama.cpp b/llama.cpp
index 5b31f201..c5f4053f 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -1118,6 +1118,12 @@ static std::string llama_token_to_piece(const struct llama_context * ctx, llama_
//
struct llama_state {
+ llama_state() {
+#ifdef GGML_USE_METAL
+ ggml_metal_log_set_callback(log_callback, log_callback_user_data);
+#endif
+ }
+
// We save the log callback globally
ggml_log_callback log_callback = llama_log_callback_default;
void * log_callback_user_data = nullptr;
@@ -8569,8 +8575,6 @@ struct llama_context * llama_new_context_with_model(
#ifdef GGML_USE_METAL
if (model->n_gpu_layers > 0) {
- ggml_metal_log_set_callback(llama_log_callback_default, NULL);
-
ctx->ctx_metal = ggml_metal_init(1);
if (!ctx->ctx_metal) {
LLAMA_LOG_ERROR("%s: ggml_metal_init() failed\n", __func__);
@@ -9706,6 +9710,9 @@ const std::vector<std::pair<std::string, struct ggml_tensor *>> & llama_internal
void llama_log_set(ggml_log_callback log_callback, void * user_data) {
g_state.log_callback = log_callback ? log_callback : llama_log_callback_default;
g_state.log_callback_user_data = user_data;
+#ifdef GGML_USE_METAL
+ ggml_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data);
+#endif
}
static void llama_log_internal_v(ggml_log_level level, const char * format, va_list args) {