summaryrefslogtreecommitdiff
path: root/common/train.cpp
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-01-31 17:30:17 +0200
committerGitHub <noreply@github.com>2024-01-31 17:30:17 +0200
commit5cb04dbc16d1da38c8fdcc0111b40e67d00dd1c3 (patch)
tree3ef8dc640d5c08466309c09a8ac2963bb760af06 /common/train.cpp
parentefb7bdbbd061d087c788598b97992c653f992ddd (diff)
llama : remove LLAMA_MAX_DEVICES and LLAMA_SUPPORTS_GPU_OFFLOAD (#5240)
* llama : remove LLAMA_MAX_DEVICES from llama.h ggml-ci * Update llama.cpp Co-authored-by: slaren <slarengh@gmail.com> * server : remove LLAMA_MAX_DEVICES ggml-ci * llama : remove LLAMA_SUPPORTS_GPU_OFFLOAD ggml-ci * train : remove LLAMA_SUPPORTS_GPU_OFFLOAD * readme : add deprecation notice * readme : change deprecation notice to "remove" and fix url * llama : remove gpu includes from llama.h ggml-ci --------- Co-authored-by: slaren <slarengh@gmail.com>
Diffstat (limited to 'common/train.cpp')
-rw-r--r--common/train.cpp12
1 files changed, 6 insertions, 6 deletions
diff --git a/common/train.cpp b/common/train.cpp
index e6f2f7a2..e4c3d5df 100644
--- a/common/train.cpp
+++ b/common/train.cpp
@@ -1363,12 +1363,12 @@ bool consume_common_train_arg(
*invalid_param = true;
return true;
}
-#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
- params->n_gpu_layers = std::stoi(argv[i]);
-#else
- fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
- fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
-#endif
+ if (llama_supports_gpu_offload()) {
+ params->n_gpu_layers = std::stoi(argv[i]);
+ } else {
+ fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
+ fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
+ }
} else if (arg == "-h" || arg == "--help") {
params->print_usage = true;
return true;