diff options
author | Olivier Chafik <ochafik@users.noreply.github.com> | 2024-06-13 00:41:52 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-06-13 00:41:52 +0100 |
commit | 1c641e6aac5c18b964e7b32d9dbbb4bf5301d0d7 (patch) | |
tree | 616348dac8e67d80a03a81847ce9ee4bb7e19d49 /scripts/run-with-preset.py | |
parent | 963552903f51043ee947a8deeaaa7ec00bc3f1a4 (diff) |
`build`: rename main → llama-cli, server → llama-server, llava-cli → llama-llava-cli, etc... (#7809)
* `main`/`server`: rename to `llama` / `llama-server` for consistency w/ homebrew
* server: update refs -> llama-server
gitignore llama-server
* server: simplify nix package
* main: update refs -> llama
fix examples/main ref
* main/server: fix targets
* update more names
* Update build.yml
* rm accidentally checked in bins
* update straggling refs
* Update .gitignore
* Update server-llm.sh
* main: target name -> llama-cli
* Prefix all example bins w/ llama-
* fix main refs
* rename {main->llama}-cmake-pkg binary
* prefix more cmake targets w/ llama-
* add/fix gbnf-validator subfolder to cmake
* sort cmake example subdirs
* rm bin files
* fix llama-lookup-* Makefile rules
* gitignore /llama-*
* rename Dockerfiles
* rename llama|main -> llama-cli; consistent RPM bin prefixes
* fix some missing -cli suffixes
* rename dockerfile w/ llama-cli
* rename(make): llama-baby-llama
* update dockerfile refs
* more llama-cli(.exe)
* fix test-eval-callback
* rename: llama-cli-cmake-pkg(.exe)
* address gbnf-validator unused fread warning (switched to C++ / ifstream)
* add two missing llama- prefixes
* Updating docs for eval-callback binary to use new `llama-` prefix.
* Updating a few lingering doc references for rename of main to llama-cli
* Updating `run-with-preset.py` to use new binary names.
Updating docs around `perplexity` binary rename.
* Updating documentation references for lookup-merge and export-lora
* Updating two small `main` references missed earlier in the finetune docs.
* Update apps.nix
* update grammar/README.md w/ new llama-* names
* update llama-rpc-server bin name + doc
* Revert "update llama-rpc-server bin name + doc"
This reverts commit e474ef1df481fd8936cd7d098e3065d7de378930.
* add hot topic notice to README.md
* Update README.md
* Update README.md
* rename gguf-split & quantize bins refs in **/tests.sh
---------
Co-authored-by: HanClinto <hanclinto@gmail.com>
Diffstat (limited to 'scripts/run-with-preset.py')
-rwxr-xr-x | scripts/run-with-preset.py | 16 |
1 files changed, 8 insertions, 8 deletions
diff --git a/scripts/run-with-preset.py b/scripts/run-with-preset.py index 0d721911..ee21eab3 100755 --- a/scripts/run-with-preset.py +++ b/scripts/run-with-preset.py @@ -10,7 +10,7 @@ import yaml logger = logging.getLogger("run-with-preset") -CLI_ARGS_MAIN_PERPLEXITY = [ +CLI_ARGS_LLAMA_CLI_PERPLEXITY = [ "batch-size", "cfg-negative-prompt", "cfg-scale", "chunks", "color", "ctx-size", "escape", "export", "file", "frequency-penalty", "grammar", "grammar-file", "hellaswag", "hellaswag-tasks", "ignore-eos", "in-prefix", "in-prefix-bos", "in-suffix", @@ -29,7 +29,7 @@ CLI_ARGS_LLAMA_BENCH = [ "n-prompt", "output", "repetitions", "tensor-split", "threads", "verbose" ] -CLI_ARGS_SERVER = [ +CLI_ARGS_LLAMA_SERVER = [ "alias", "batch-size", "ctx-size", "embedding", "host", "memory-f32", "lora", "lora-base", "low-vram", "main-gpu", "mlock", "model", "n-gpu-layers", "n-probs", "no-mmap", "no-mul-mat-q", "numa", "path", "port", "rope-freq-base", "timeout", "rope-freq-scale", "tensor-split", @@ -37,7 +37,7 @@ CLI_ARGS_SERVER = [ ] description = """Run llama.cpp binaries with presets from YAML file(s). -To specify which binary should be run, specify the "binary" property (main, perplexity, llama-bench, and server are supported). +To specify which binary should be run, specify the "binary" property (llama-cli, llama-perplexity, llama-bench, and llama-server are supported). To get a preset file template, run a llama.cpp binary with the "--logdir" CLI argument. Formatting considerations: @@ -77,19 +77,19 @@ for yaml_file in known_args.yaml_files: props = {prop.replace("_", "-"): val for prop, val in props.items()} -binary = props.pop("binary", "main") +binary = props.pop("binary", "llama-cli") if known_args.binary: binary = known_args.binary if os.path.exists(f"./{binary}"): binary = f"./{binary}" -if binary.lower().endswith("main") or binary.lower().endswith("perplexity"): - cli_args = CLI_ARGS_MAIN_PERPLEXITY +if binary.lower().endswith("llama-cli") or binary.lower().endswith("llama-perplexity"): + cli_args = CLI_ARGS_LLAMA_CLI_PERPLEXITY elif binary.lower().endswith("llama-bench"): cli_args = CLI_ARGS_LLAMA_BENCH -elif binary.lower().endswith("server"): - cli_args = CLI_ARGS_SERVER +elif binary.lower().endswith("llama-server"): + cli_args = CLI_ARGS_LLAMA_SERVER else: logger.error(f"Unknown binary: {binary}") sys.exit(1) |