diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-11-19 19:16:07 +0200 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-11-19 19:16:07 +0200 |
commit | dae06c06e5c6232ae2be4d567dd5101e1e96c814 (patch) | |
tree | 2c7f4f9c2c17b61ed1b3a69a80e8c3fc4367e8d0 | |
parent | 05e8301e4593e2a67b4bae24f093dd12ce5cc7c2 (diff) |
Revert "finetune : add --n-gpu-layers flag info to --help (#4128)"
This reverts commit 05e8301e4593e2a67b4bae24f093dd12ce5cc7c2.
-rw-r--r-- | examples/finetune/finetune.cpp | 1 |
1 files changed, 0 insertions, 1 deletions
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp index e991e37e..af46e44a 100644 --- a/examples/finetune/finetune.cpp +++ b/examples/finetune/finetune.cpp @@ -1288,7 +1288,6 @@ static void train_print_usage(int argc, char ** argv, const struct train_params fprintf(stderr, " --model-base FNAME model path from which to load base model (default '%s')\n", params->fn_model_base); fprintf(stderr, " --lora-out FNAME path to save llama lora (default '%s')\n", params->fn_lora_out); fprintf(stderr, " --only-write-lora only save llama lora, don't do any training. use this if you only want to convert a checkpoint to a lora adapter.\n"); - fprintf(stderr, " --n-gpu-layers N Number of model layers to offload to GPU (default 0).\n"); fprintf(stderr, " --norm-rms-eps F RMS-Norm epsilon value (default %f)\n", params->f_norm_rms_eps); fprintf(stderr, " --rope-freq-base F Frequency base for ROPE (default %f)\n", params->rope_freq_base); fprintf(stderr, " --rope-freq-scale F Frequency scale for ROPE (default %f)\n", params->rope_freq_scale); |