summaryrefslogtreecommitdiff
path: root/ggml.c
diff options
context:
space:
mode:
authorJohannes Gäßler <johannesg@5d6.de>2023-05-13 15:38:36 +0200
committerGitHub <noreply@github.com>2023-05-13 16:38:36 +0300
commit905d87b70aa189623d500a28602d7a3a755a4769 (patch)
tree11f0d435ecb7555734b14b7a8994e88772bf8190 /ggml.c
parentf954edda935a70a14cf0cc45ecc7fe7d60cf3e4b (diff)
ggml : GPU-accelerated token generation (#1412)
* CUDA kernel for q4_0 dequant. + mat. vec. mult. * Added q4_1 via template * Added missing __syncthreads(); * --gpu_layers -> --gpu-layers * Shorter dequantize_mul_mat_vec line * q5_0 dequantize_mul_mat kernel * More readable dequantize_mul_mat_vec logic * dequantize_mul_mat_vec kernels for q5_1, q8_0, f16 * llama : offload "output" tensor to GPU too + coding style fixes --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'ggml.c')
-rw-r--r--ggml.c1
1 files changed, 1 insertions, 0 deletions
diff --git a/ggml.c b/ggml.c
index 675eb0d2..05746383 100644
--- a/ggml.c
+++ b/ggml.c
@@ -3882,6 +3882,7 @@ struct ggml_tensor * ggml_new_tensor_impl(
*result = (struct ggml_tensor) {
/*.type =*/ type,
+ /*.backend =*/ GGML_BACKEND_CPU,
/*.n_dims =*/ n_dims,
/*.ne =*/ { 1, 1, 1, 1 },
/*.nb =*/ { 0, 0, 0, 0 },