diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-10-22 08:37:20 +0300 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-10-22 08:37:20 +0300 |
commit | 22c69a27945e7acf9690dd3210d316f22182751c (patch) | |
tree | 021f56cdbeab8d1588f4a7db4626a11165e4aa94 | |
parent | 465219b9143ac01db0990bbcb0a081ef72ec2008 (diff) |
batched : add len CLI argument
-rw-r--r-- | examples/batched/batched.cpp | 13 |
1 files changed, 9 insertions, 4 deletions
diff --git a/examples/batched/batched.cpp b/examples/batched/batched.cpp index 15521216..2797329b 100644 --- a/examples/batched/batched.cpp +++ b/examples/batched/batched.cpp @@ -11,12 +11,16 @@ int main(int argc, char ** argv) { gpt_params params; if (argc == 1 || argv[1][0] == '-') { - printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL]\n" , argv[0]); + printf("usage: %s MODEL_PATH [PROMPT] [PARALLEL] [LEN]\n" , argv[0]); return 1 ; } + // number of parallel batches int n_parallel = 1; + // total length of the sequences including the prompt + int n_len = 32; + if (argc >= 2) { params.model = argv[1]; } @@ -29,13 +33,14 @@ int main(int argc, char ** argv) { n_parallel = std::atoi(argv[3]); } + if (argc >= 5) { + n_len = std::atoi(argv[4]); + } + if (params.prompt.empty()) { params.prompt = "Hello my name is"; } - // total length of the sequences including the prompt - const int n_len = 32; - // init LLM llama_backend_init(params.numa); |