summaryrefslogtreecommitdiff
path: root/ggml/src/ggml-cuda.cu
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-03-12 07:21:46 +0200
committerGitHub <noreply@github.com>2025-03-12 07:21:46 +0200
commit3f23ed68f17583a8ee63afd0c214f5b39226226c (patch)
treead86914fd2925935247d2fba0ebb3b8b5d2c9bfc /ggml/src/ggml-cuda.cu
parenta48e16324770bb829406d06e11be1df0c8a3b517 (diff)
MLA-2: Allow usage of q8_0 for KV cache on CUDA (#252)
* FlashMLA(CUDA): WIP to allow q8_0 quantized cache * WIP * FlashMLA(CUDA) - allow q8_0 for KV cache This works, and PP is not bad, but TG is still quite a bit slower. * FlashMLA(CUDA) - allow q8_0 for KV cache This is better. ~9% slower than f16 cache for short contexts, nearly on par at 16k tokens. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml-cuda.cu')
-rw-r--r--ggml/src/ggml-cuda.cu11
1 files changed, 8 insertions, 3 deletions
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
index f25dd725..1bb869c3 100644
--- a/ggml/src/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda.cu
@@ -2296,9 +2296,6 @@ static void ggml_cuda_mul_mat_id(ggml_backend_cuda_context & ctx, ggml_tensor *
for (int64_t id = 0; id < n_ids; id++) {
const int32_t row_id_i = *(const int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]);
- if (i02 < 0 || i02 >= n_as) continue;
- //GGML_ASSERT(row_id_i >= 0 && row_id_i < n_as);
-
if (row_id_i != i02) {
continue;
}
@@ -3458,6 +3455,14 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
return true;
}
+ if (ggml_is_contiguous(op->src[0]) && ggml_are_same_shape(op->src[0], op->src[1])) {
+ if (src1_type == GGML_TYPE_F16 || src1_type == GGML_TYPE_BF16 || src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ }
+ if (ggml_are_same_shape(op->src[0], op->src[1]) && op->src[0]->type == GGML_TYPE_Q8_0 && op->src[1]->type == GGML_TYPE_Q8_0) {
+ return true;
+ }
return false;
} break;
case GGML_OP_DUP: