diff options
author | Johannes Gäßler <johannesg@5d6.de> | 2023-05-08 02:42:01 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-05-08 02:42:01 +0200 |
commit | 1f48b0abcfbd6cc99571e42348e0ec97e4be8b93 (patch) | |
tree | 615e8273f6788fdfea285859bab63981c030c742 | |
parent | e1295513a48ae8254d8af5ec0250b56d6eaffefd (diff) |
Documented CUDA reproducibility, added warning (#1346)
-rw-r--r-- | README.md | 2 | ||||
-rw-r--r-- | examples/common.cpp | 3 | ||||
-rw-r--r-- | ggml-cuda.cu | 2 |
3 files changed, 6 insertions, 1 deletions
@@ -257,6 +257,8 @@ Building the program with BLAS support may lead to some performance improvements cmake --build . --config Release ``` +Note: Because llama.cpp uses multiple CUDA streams for matrix multiplication results [are not guaranteed to be reproducible](https://docs.nvidia.com/cuda/cublas/index.html#results-reproducibility). If you need reproducibility, set `GGML_CUDA_MAX_STREAMS` in the file `ggml-cuda.cu` to 1. + ### Prepare Data & Run ```bash diff --git a/examples/common.cpp b/examples/common.cpp index 97eded6e..f1c3bae1 100644 --- a/examples/common.cpp +++ b/examples/common.cpp @@ -100,6 +100,9 @@ bool gpt_params_parse(int argc, char ** argv, gpt_params & params) { arg = argv[i]; if (arg == "-s" || arg == "--seed") { +#if defined(GGML_USE_CUBLAS) + fprintf(stderr, "WARNING: when using cuBLAS generation results are NOT guaranteed to be reproducible.\n"); +#endif if (++i >= argc) { invalid_param = true; break; diff --git a/ggml-cuda.cu b/ggml-cuda.cu index e8a1e77c..127b352a 100644 --- a/ggml-cuda.cu +++ b/ggml-cuda.cu @@ -348,7 +348,7 @@ static void ggml_cuda_pool_free(void * ptr, size_t size) { CUDA_CHECK(cudaFree(ptr)); } -#define GGML_CUDA_MAX_STREAMS 8 +#define GGML_CUDA_MAX_STREAMS 8 // Set this to 1 for reproducible matrix multiplication. #define GGML_CUDA_MAX_EVENTS 64 static cublasHandle_t g_cublasH = nullptr; static cudaStream_t g_cudaStreams[GGML_CUDA_MAX_STREAMS] = { nullptr }; |