diff options
author | fraxy-v <65565042+fraxy-v@users.noreply.github.com> | 2024-05-19 01:44:42 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-05-19 00:44:42 +0200 |
commit | f5bf761747988ee1832766f7d1433739aff810da (patch) | |
tree | 54601f9a26ed04531122e338ab55484547be2bf4 /llama.cpp | |
parent | 059031b8c40e1f4ba60586842c5b1ed3ddf61842 (diff) |
Capture CUDA logging output (#7298)
* logging: output capture in cuda module
* fix compile error
* fix: vsnprintf terminates with 0, string use not correct
* post review
* Update llama.cpp
Co-authored-by: slaren <slarengh@gmail.com>
* Update llama.cpp
Co-authored-by: slaren <slarengh@gmail.com>
---------
Co-authored-by: slaren <slarengh@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r-- | llama.cpp | 4 |
1 files changed, 4 insertions, 0 deletions
@@ -1697,6 +1697,8 @@ struct llama_state { llama_state() { #ifdef GGML_USE_METAL ggml_backend_metal_log_set_callback(log_callback, log_callback_user_data); +#elif defined(GGML_USE_CUDA) + ggml_backend_cuda_log_set_callback(log_callback, log_callback_user_data); #endif } @@ -18174,6 +18176,8 @@ void llama_log_set(ggml_log_callback log_callback, void * user_data) { g_state.log_callback_user_data = user_data; #ifdef GGML_USE_METAL ggml_backend_metal_log_set_callback(g_state.log_callback, g_state.log_callback_user_data); +#elif defined(GGML_USE_CUDA) + ggml_backend_cuda_log_set_callback(g_state.log_callback, g_state.log_callback_user_data); #endif } |