summaryrefslogtreecommitdiff
path: root/examples/server
diff options
context:
space:
mode:
authorOlivier Chafik <ochafik@users.noreply.github.com>2024-04-30 00:52:50 +0100
committerGitHub <noreply@github.com>2024-04-30 00:52:50 +0100
commit8843a98c2ba97a25e93319a104f9ddfaf83ce4c4 (patch)
tree82d73687b9dd42033a388d83c3b491925a0444b9 /examples/server
parentb8c1476e44cc1f3a1811613f65251cf779067636 (diff)
Improve usability of --model-url & related flags (#6930)
* args: default --model to models/ + filename from --model-url or --hf-file (or else legacy models/7B/ggml-model-f16.gguf) * args: main & server now call gpt_params_handle_model_default * args: define DEFAULT_MODEL_PATH + update cli docs * curl: check url of previous download (.json metadata w/ url, etag & lastModified) * args: fix update to quantize-stats.cpp * curl: support legacy .etag / .lastModified companion files * curl: rm legacy .etag file support * curl: reuse regex across headers callback calls * curl: unique_ptr to manage lifecycle of curl & outfile * curl: nit: no need for multiline regex flag * curl: update failed test (model file collision) + gitignore *.gguf.json
Diffstat (limited to 'examples/server')
-rw-r--r--examples/server/server.cpp4
-rw-r--r--examples/server/tests/features/embeddings.feature2
2 files changed, 4 insertions, 2 deletions
diff --git a/examples/server/server.cpp b/examples/server/server.cpp
index 2760aea8..01453af2 100644
--- a/examples/server/server.cpp
+++ b/examples/server/server.cpp
@@ -2353,7 +2353,7 @@ static void server_print_usage(const char * argv0, const gpt_params & params, co
printf(" disable KV offload\n");
}
printf(" -m FNAME, --model FNAME\n");
- printf(" model path (default: %s)\n", params.model.c_str());
+ printf(" model path (default: models/$filename with filename from --hf-file or --model-url if set, otherwise %s)\n", DEFAULT_MODEL_PATH);
printf(" -mu MODEL_URL, --model-url MODEL_URL\n");
printf(" model download url (default: unused)\n");
printf(" -hfr REPO, --hf-repo REPO\n");
@@ -2835,6 +2835,8 @@ static void server_params_parse(int argc, char ** argv, server_params & sparams,
}
}
+ gpt_params_handle_model_default(params);
+
if (!params.kv_overrides.empty()) {
params.kv_overrides.emplace_back();
params.kv_overrides.back().key[0] = 0;
diff --git a/examples/server/tests/features/embeddings.feature b/examples/server/tests/features/embeddings.feature
index dcf1434f..6f163ce0 100644
--- a/examples/server/tests/features/embeddings.feature
+++ b/examples/server/tests/features/embeddings.feature
@@ -5,7 +5,7 @@ Feature: llama.cpp server
Background: Server startup
Given a server listening on localhost:8080
And a model url https://huggingface.co/ggml-org/models/resolve/main/bert-bge-small/ggml-model-f16.gguf
- And a model file ggml-model-f16.gguf
+ And a model file bert-bge-small.gguf
And a model alias bert-bge-small
And 42 as server seed
And 2 slots