summaryrefslogtreecommitdiff
path: root/examples/finetune/finetune.cpp
diff options
context:
space:
mode:
authorClark Saben <76020733+csaben@users.noreply.github.com>2023-11-19 11:56:38 -0500
committerGitHub <noreply@github.com>2023-11-19 18:56:38 +0200
commit05e8301e4593e2a67b4bae24f093dd12ce5cc7c2 (patch)
tree259a916cb4361e52081c88e2c564cda2abc75032 /examples/finetune/finetune.cpp
parent936c79b2275a8f15f3512e63de615c676904d650 (diff)
finetune : add --n-gpu-layers flag info to --help (#4128)
Diffstat (limited to 'examples/finetune/finetune.cpp')
-rw-r--r--examples/finetune/finetune.cpp1
1 files changed, 1 insertions, 0 deletions
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp
index af46e44a..e991e37e 100644
--- a/examples/finetune/finetune.cpp
+++ b/examples/finetune/finetune.cpp
@@ -1288,6 +1288,7 @@ static void train_print_usage(int argc, char ** argv, const struct train_params
fprintf(stderr, " --model-base FNAME model path from which to load base model (default '%s')\n", params->fn_model_base);
fprintf(stderr, " --lora-out FNAME path to save llama lora (default '%s')\n", params->fn_lora_out);
fprintf(stderr, " --only-write-lora only save llama lora, don't do any training. use this if you only want to convert a checkpoint to a lora adapter.\n");
+ fprintf(stderr, " --n-gpu-layers N Number of model layers to offload to GPU (default 0).\n");
fprintf(stderr, " --norm-rms-eps F RMS-Norm epsilon value (default %f)\n", params->f_norm_rms_eps);
fprintf(stderr, " --rope-freq-base F Frequency base for ROPE (default %f)\n", params->rope_freq_base);
fprintf(stderr, " --rope-freq-scale F Frequency scale for ROPE (default %f)\n", params->rope_freq_scale);