diff options
Diffstat (limited to 'examples')
-rw-r--r-- | examples/embedding/embedding.cpp | 11 | ||||
-rw-r--r-- | examples/main/main.cpp | 6 | ||||
-rw-r--r-- | examples/perplexity/perplexity.cpp | 7 |
3 files changed, 14 insertions, 10 deletions
diff --git a/examples/embedding/embedding.cpp b/examples/embedding/embedding.cpp index 49ab3e06..e4a0a38c 100644 --- a/examples/embedding/embedding.cpp +++ b/examples/embedding/embedding.cpp @@ -17,11 +17,6 @@ int main(int argc, char ** argv) { params.embedding = true; - if (params.n_ctx > 2048) { - fprintf(stderr, "%s: warning: model might not support context sizes greater than 2048 tokens (%d specified);" - "expect poor results\n", __func__, params.n_ctx); - } - fprintf(stderr, "%s: build = %d (%s)\n", __func__, BUILD_NUMBER, BUILD_COMMIT); if (params.seed == LLAMA_DEFAULT_SEED) { @@ -47,6 +42,12 @@ int main(int argc, char ** argv) { return 1; } + const int n_ctx_train = llama_n_ctx_train(ctx); + if (params.n_ctx > n_ctx_train) { + fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n", + __func__, n_ctx_train, params.n_ctx); + } + // print system information { fprintf(stderr, "\n"); diff --git a/examples/main/main.cpp b/examples/main/main.cpp index be030fff..baec6ba1 100644 --- a/examples/main/main.cpp +++ b/examples/main/main.cpp @@ -182,8 +182,10 @@ int main(int argc, char ** argv) { return 1; } - if (params.n_ctx > llama_n_ctx(ctx)) { - LOG_TEE("%s: warning: base model only supports context sizes no greater than %d tokens (%d specified)\n", __func__, llama_n_ctx(ctx), params.n_ctx); + const int n_ctx_train = llama_n_ctx_train(ctx); + if (params.n_ctx > n_ctx_train) { + LOG_TEE("%s: warning: model was trained on only %d context tokens (%d specified)\n", + __func__, n_ctx_train, params.n_ctx); } else if (params.n_ctx < 8) { LOG_TEE("%s: warning: minimum context size is 8, using minimum size.\n", __func__); params.n_ctx = 8; diff --git a/examples/perplexity/perplexity.cpp b/examples/perplexity/perplexity.cpp index 1b760683..3a1c8c28 100644 --- a/examples/perplexity/perplexity.cpp +++ b/examples/perplexity/perplexity.cpp @@ -693,9 +693,10 @@ int main(int argc, char ** argv) { return 1; } - if (params.n_ctx > llama_n_ctx(ctx)) { - fprintf(stderr, "%s: warning: model might not support context sizes greater than %d tokens (%d specified);" - "expect poor results\n", __func__, llama_n_ctx(ctx), params.n_ctx); + const int n_ctx_train = llama_n_ctx_train(ctx); + if (params.n_ctx > n_ctx_train) { + fprintf(stderr, "%s: warning: model was trained on only %d context tokens (%d specified)\n", + __func__, n_ctx_train, params.n_ctx); } // print system information |