summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorDaniel Bevenius <daniel.bevenius@gmail.com>2023-10-13 12:33:16 +0200
committerGitHub <noreply@github.com>2023-10-13 13:33:16 +0300
commit2a4bcbacead886996f175f33479d1d874a3e577f (patch)
tree2f4360333d472e1fd90b1fa5d82d9ca73d6f1436 /llama.cpp
parent424b6381c4daeed62e6600e0402e72f39845b58d (diff)
llama : remove n_threads from llama_decode_internal (#3614)
This commit removes `n_threads` from the `llama_decode_internal` functions doc comment as it does not exist anymore. It looks like this parameter was removed in Commit 16bc66d9479edd5ee12ec734973554d4493c5dfa ("llama.cpp : split llama_context_params into model and context params"). Signed-off-by: Daniel Bevenius <daniel.bevenius@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp1
1 files changed, 0 insertions, 1 deletions
diff --git a/llama.cpp b/llama.cpp
index 7ed87223..2cd2dad7 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -5721,7 +5721,6 @@ static struct ggml_cgraph * llama_build_graph(
//
// - lctx: llama context
// - batch: batch to evaluate
-// - n_threads: number of threads to use
//
// return 0 on success
// return positive int on warning