diff options
author | Jiří Podivín <66251151+jpodivin@users.noreply.github.com> | 2023-11-17 16:19:16 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-11-17 17:19:16 +0200 |
commit | ba4cf5c0bf37a729d29e899dadf14541cddd23d4 (patch) | |
tree | 9dcf5b40fc32a57c0c7b602a8e47cf8c4b7583a6 /examples/finetune/finetune.cpp | |
parent | e85bb1a8e736228a1f0d965777de5f77f22834b8 (diff) |
train : move number of gpu layers argument parsing to common/train.cpp (#4074)
- introduces help entry for the argument
- cuts '--gpu-layers' form in order to simplify usage and documentation.
Signed-off-by: Jiri Podivin <jpodivin@gmail.com>
Co-authored-by: Jiri Podivin <jpodivin@redhat.com>
Diffstat (limited to 'examples/finetune/finetune.cpp')
-rw-r--r-- | examples/finetune/finetune.cpp | 11 |
1 files changed, 0 insertions, 11 deletions
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index 7fecce25..af46e44a 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1460,17 +1460,6 @@ static bool train_params_parse(int argc, char ** argv, struct train_params * par } params->n_rank_w3 = std::stoi(argv[i]); params->custom_n_rank_w3 = true; - } else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") { - if (++i >= argc) { - invalid_param = true; - break; - } -#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD - params->common.n_gpu_layers = std::stoi(argv[i]); -#else - fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n"); - fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n"); -#endif } else { fprintf(stderr, "error: unknown argument: %s\n", arg.c_str()); train_print_usage(argc, argv, &default_params); |