summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/finetune/finetune.cpp14
-rw-r--r--examples/finetune/finetune.sh34
2 files changed, 47 insertions, 1 deletions
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp
index 35824cd2..60c7faa7 100644
--- a/examples/finetune/finetune.cpp
+++ b/examples/finetune/finetune.cpp
@@ -652,7 +652,7 @@ static struct ggml_tensor * llama_build_lora_finetune_graphs(
GGML_ASSERT(tokens_input->type == GGML_TYPE_I32);
auto add_to_f32 = [] (struct ggml_context * ctx, struct ggml_tensor * a, struct ggml_tensor * b) {
- if (ggml_is_quantized(a->type)) {
+ if (ggml_is_quantized(a->type) || a->type == GGML_TYPE_F16) {
return ggml_add_cast(ctx, a, b, GGML_TYPE_F32);
} else if (a->type == GGML_TYPE_F32) {
return ggml_add(ctx, a, b);
@@ -1459,6 +1459,17 @@ static bool train_params_parse(int argc, char ** argv, struct train_params * par
}
params->n_rank_w3 = std::stoi(argv[i]);
params->custom_n_rank_w3 = true;
+ } else if (arg == "--gpu-layers" || arg == "-ngl" || arg == "--n-gpu-layers") {
+ if (++i >= argc) {
+ invalid_param = true;
+ break;
+ }
+#ifdef LLAMA_SUPPORTS_GPU_OFFLOAD
+ params->common.n_gpu_layers = std::stoi(argv[i]);
+#else
+ fprintf(stderr, "warning: not compiled with GPU offload support, --n-gpu-layers option will be ignored\n");
+ fprintf(stderr, "warning: see main README.md for information on enabling GPU BLAS support\n");
+#endif
} else {
fprintf(stderr, "error: unknown argument: %s\n", arg.c_str());
train_print_usage(argc, argv, &default_params);
@@ -1545,6 +1556,7 @@ int main(int argc, char ** argv) {
srand(params.common.seed);
struct llama_model_params llama_mparams = llama_model_default_params();
+ llama_mparams.n_gpu_layers = params.common.n_gpu_layers;
llama_mparams.vocab_only = false;
printf("%s: model base = '%s'\n", __func__, params.fn_model_base);
diff --git a/examples/finetune/finetune.sh b/examples/finetune/finetune.sh
new file mode 100644
index 00000000..079bfa11
--- /dev/null
+++ b/examples/finetune/finetune.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+cd `dirname $0`
+cd ../..
+
+EXE="./finetune"
+
+if [[ ! $LLAMA_MODEL_DIR ]]; then LLAMA_MODEL_DIR="./models"; fi
+if [[ ! $LLAMA_TRAINING_DIR ]]; then LLAMA_TRAINING_DIR="."; fi
+
+# MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2-q8_0.gguf" # This is the model the readme uses.
+MODEL="$LLAMA_MODEL_DIR/openllama-3b-v2.gguf" # An f16 model. Note in this case with "-g", you get an f32-format .BIN file that isn't yet supported if you use it with "main --lora" with GPU inferencing.
+
+while getopts "dg" opt; do
+ case $opt in
+ d)
+ DEBUGGER="gdb --args"
+ ;;
+ g)
+ EXE="./build/bin/Release/finetune"
+ GPUARG="--gpu-layers 25"
+ ;;
+ esac
+done
+
+$DEBUGGER $EXE \
+ --model-base $MODEL \
+ $GPUARG \
+ --checkpoint-in chk-ol3b-shakespeare-LATEST.gguf \
+ --checkpoint-out chk-ol3b-shakespeare-ITERATION.gguf \
+ --lora-out lora-ol3b-shakespeare-ITERATION.bin \
+ --train-data "$LLAMA_TRAINING_DIR\shakespeare.txt" \
+ --save-every 10 \
+ --threads 10 --adam-iter 30 --batch 4 --ctx 64 \
+ --use-checkpointing