diff options
author | Georgi Gerganov <ggerganov@gmail.com> | 2023-08-27 18:55:41 +0300 |
---|---|---|
committer | Georgi Gerganov <ggerganov@gmail.com> | 2023-08-27 18:55:41 +0300 |
commit | c10704d01e21e3dbe4d6ca1026ebff85349dd239 (patch) | |
tree | d82ea2bcae1c22efbe4d8d9026df38c859658450 | |
parent | 230d46c723edf5999752e4cb67fd94edb19ef9c7 (diff) |
llama : fix MPI threads (close #2827)
-rw-r--r-- | llama.cpp | 3 |
1 files changed, 2 insertions, 1 deletions
@@ -2845,7 +2845,6 @@ static bool llama_eval_internal( GGML_ASSERT(n_tokens > 0); GGML_ASSERT(n_past >= 0); - GGML_ASSERT(n_threads > 0); // TODO: keep the values of n_batch and n_ctx // GGML_ASSERT(n_tokens <= n_batch); // GGML_ASSERT(n_past + n_tokens <= n_ctx); @@ -2856,6 +2855,8 @@ static bool llama_eval_internal( ggml_mpi_eval_init(lctx.ctx_mpi, &n_tokens, &n_past, &n_threads); #endif + GGML_ASSERT(n_threads > 0); + const int N = n_tokens; const auto & model = lctx.model; |