summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp2
1 files changed, 1 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index ead1d421..42cedc7a 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -8003,7 +8003,7 @@ static int llama_apply_lora_from_file_internal(
if (dest_t->backend == GGML_BACKEND_GPU || dest_t->backend == GGML_BACKEND_GPU_SPLIT) {
if (dest_t->type != GGML_TYPE_F16) {
throw std::runtime_error(format(
- "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models", __func__));
+ "%s: error: the simultaneous use of LoRAs and GPU acceleration is only supported for f16 models. dest_t->type: %d", __func__, dest_t->type));
}
offload_func = ggml_cuda_assign_buffers;
offload_func_force_inplace = ggml_cuda_assign_buffers_force_inplace;