summaryrefslogtreecommitdiff
path: root/examples
diff options
context:
space:
mode:
authorGeorgi Gerganov <ggerganov@gmail.com>2024-02-25 12:09:09 +0200
committerGitHub <noreply@github.com>2024-02-25 12:09:09 +0200
commitab336a9d5e5352ecdcdf4c12d2d54cf4ef82ce31 (patch)
tree5694ecb0647b10a6377a273737b63bb025dc961d /examples
parent69917dfa55674c608360638bb4d6a12a315e2810 (diff)
code : normalize enum names (#5697)
* coda : normalize enum names ggml-ci * code : cont * code : cont
Diffstat (limited to 'examples')
-rw-r--r--examples/baby-llama/baby-llama.cpp2
-rw-r--r--examples/finetune/finetune.cpp2
-rw-r--r--examples/llama-bench/llama-bench.cpp14
-rw-r--r--examples/llava/llava.cpp2
-rw-r--r--examples/server/server.cpp18
-rw-r--r--examples/train-text-from-scratch/train-text-from-scratch.cpp2
6 files changed, 20 insertions, 20 deletions
diff --git a/examples/baby-llama/baby-llama.cpp b/examples/baby-llama/baby-llama.cpp
index 65bb238a..bf0125e7 100644
--- a/examples/baby-llama/baby-llama.cpp
+++ b/examples/baby-llama/baby-llama.cpp
@@ -1547,7 +1547,7 @@ int main(int argc, char ** argv) {
float error_before_opt = ggml_get_f32_1d(e, 0);
- struct ggml_opt_params opt_params_lbfgs = ggml_opt_default_params(GGML_OPT_LBFGS);
+ struct ggml_opt_params opt_params_lbfgs = ggml_opt_default_params(GGML_OPT_TYPE_LBFGS);
opt_params_lbfgs.print_forward_graph = false;
opt_params_lbfgs.print_backward_graph = false;
opt_params_lbfgs.lbfgs.n_iter = 16;
diff --git a/examples/finetune/finetune.cpp b/examples/finetune/finetune.cpp
index 98bf5a07..3da5317b 100644
--- a/examples/finetune/finetune.cpp
+++ b/examples/finetune/finetune.cpp
@@ -1531,7 +1531,7 @@ int main(int argc, char ** argv) {
lora.hparams.n_rank_output = n_rank_output;
// set opt params from command line
- opt->params = ggml_opt_default_params(GGML_OPT_ADAM);
+ opt->params = ggml_opt_default_params(GGML_OPT_TYPE_ADAM);
opt->params.print_forward_graph = false;
opt->params.print_backward_graph = false;
opt->params.graph_size = LLAMA_TRAIN_MAX_NODES;
diff --git a/examples/llama-bench/llama-bench.cpp b/examples/llama-bench/llama-bench.cpp
index 11410f8a..8fec3d43 100644
--- a/examples/llama-bench/llama-bench.cpp
+++ b/examples/llama-bench/llama-bench.cpp
@@ -157,9 +157,9 @@ static const char * output_format_str(output_formats format) {
static const char * split_mode_str(llama_split_mode mode) {
switch (mode) {
- case LLAMA_SPLIT_NONE: return "none";
- case LLAMA_SPLIT_LAYER: return "layer";
- case LLAMA_SPLIT_ROW: return "row";
+ case LLAMA_SPLIT_MODE_NONE: return "none";
+ case LLAMA_SPLIT_MODE_LAYER: return "layer";
+ case LLAMA_SPLIT_MODE_ROW: return "row";
default: GGML_ASSERT(!"invalid split mode");
}
}
@@ -193,7 +193,7 @@ static const cmd_params cmd_params_defaults = {
/* type_v */ {GGML_TYPE_F16},
/* n_threads */ {get_num_physical_cores()},
/* n_gpu_layers */ {99},
- /* split_mode */ {LLAMA_SPLIT_LAYER},
+ /* split_mode */ {LLAMA_SPLIT_MODE_LAYER},
/* main_gpu */ {0},
/* no_kv_offload */ {false},
/* mul_mat_q */ {true},
@@ -358,11 +358,11 @@ static cmd_params parse_cmd_params(int argc, char ** argv) {
for (const auto & m : p) {
llama_split_mode mode;
if (m == "none") {
- mode = LLAMA_SPLIT_NONE;
+ mode = LLAMA_SPLIT_MODE_NONE;
} else if (m == "layer") {
- mode = LLAMA_SPLIT_LAYER;
+ mode = LLAMA_SPLIT_MODE_LAYER;
} else if (m == "row") {
- mode = LLAMA_SPLIT_ROW;
+ mode = LLAMA_SPLIT_MODE_ROW;
} else {
invalid_param = true;
break;
diff --git a/examples/llava/llava.cpp b/examples/llava/llava.cpp
index 1a1cf7c7..98012816 100644
--- a/examples/llava/llava.cpp
+++ b/examples/llava/llava.cpp
@@ -152,7 +152,7 @@ static bool clip_llava_handle_patches(clip_ctx * ctx_clip, std::vector<float *>
ggml_tensor * newline_tmp = clip_get_newline_tensor(ctx_clip);
model.newline = ggml_new_tensor_1d(model.ctx, GGML_TYPE_F32, newline_tmp->ne[0]);
- if (newline_tmp->backend != GGML_BACKEND_CPU) {
+ if (newline_tmp->backend != GGML_BACKEND_TYPE_CPU) {
if (newline_tmp->buffer == NULL) {
printf("newline_tmp tensor buffer is NULL\n");
}
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 19a8c106..780862ef 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -2086,9 +2086,9 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
break;
}
std::string value(argv[i]);
- /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_NONE; }
- else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_LINEAR; }
- else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_YARN; }
+ /**/ if (value == "none") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_NONE; }
+ else if (value == "linear") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_LINEAR; }
+ else if (value == "yarn") { params.rope_scaling_type = LLAMA_ROPE_SCALING_TYPE_YARN; }
else { invalid_param = true; break; }
}
else if (arg == "--rope-freq-base")
@@ -2212,15 +2212,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
std::string arg_next = argv[i];
if (arg_next == "none")
{
- params.split_mode = LLAMA_SPLIT_NONE;
+ params.split_mode = LLAMA_SPLIT_MODE_NONE;
}
else if (arg_next == "layer")
{
- params.split_mode = LLAMA_SPLIT_LAYER;
+ params.split_mode = LLAMA_SPLIT_MODE_LAYER;
}
else if (arg_next == "row")
{
- params.split_mode = LLAMA_SPLIT_ROW;
+ params.split_mode = LLAMA_SPLIT_MODE_ROW;
}
else {
invalid_param = true;
@@ -2447,15 +2447,15 @@ static void server_params_parse(int argc, char **argv, server_params &sparams,
sep++;
if (strncmp(sep, "int:", 4) == 0) {
sep += 4;
- kvo.tag = LLAMA_KV_OVERRIDE_INT;
+ kvo.tag = LLAMA_KV_OVERRIDE_TYPE_INT;
kvo.int_value = std::atol(sep);
} else if (strncmp(sep, "float:", 6) == 0) {
sep += 6;
- kvo.tag = LLAMA_KV_OVERRIDE_FLOAT;
+ kvo.tag = LLAMA_KV_OVERRIDE_TYPE_FLOAT;
kvo.float_value = std::atof(sep);
} else if (strncmp(sep, "bool:", 5) == 0) {
sep += 5;
- kvo.tag = LLAMA_KV_OVERRIDE_BOOL;
+ kvo.tag = LLAMA_KV_OVERRIDE_TYPE_BOOL;
if (std::strcmp(sep, "true") == 0) {
kvo.bool_value = true;
} else if (std::strcmp(sep, "false") == 0) {
diff --git a/examples/train-text-from-scratch/train-text-from-scratch.cpp b/examples/train-text-from-scratch/train-text-from-scratch.cpp
index e78ab185..7eafe851 100644
--- a/examples/train-text-from-scratch/train-text-from-scratch.cpp
+++ b/examples/train-text-from-scratch/train-text-from-scratch.cpp
@@ -960,7 +960,7 @@ int main(int argc, char ** argv) {
struct ggml_opt_context * opt = train->opt;
// set opt params from command line
- opt->params = ggml_opt_default_params(GGML_OPT_ADAM);
+ opt->params = ggml_opt_default_params(GGML_OPT_TYPE_ADAM);
opt->params.print_forward_graph = false;
opt->params.print_backward_graph = false;
opt->params.graph_size = LLAMA_TRAIN_MAX_NODES;