summaryrefslogtreecommitdiff
path: root/ggml/src/ggml-cuda.cu
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-03-18 15:40:47 +0100
committerGitHub <noreply@github.com>2025-03-18 15:40:47 +0100
commit68a5b60408b1085d2b2ed5de75e004ee23f8ddb9 (patch)
treececd9be0307e484346f4bd65ebe5ff4d34afef9c /ggml/src/ggml-cuda.cu
parentf4ebf13b6a63ac1367bc392e24566d71c0b4c5b9 (diff)
Make Q8_0 KV cache work with mla=2,fa on CUDA (#264)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml-cuda.cu')
-rw-r--r--ggml/src/ggml-cuda.cu8
1 files changed, 8 insertions, 0 deletions
diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu
index 01f98594..453bde0c 100644
--- a/ggml/src/ggml-cuda.cu
+++ b/ggml/src/ggml-cuda.cu
@@ -3395,6 +3395,11 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
if (op->op == GGML_OP_MOE_FUSED_UP_GATE && a->type != op->src[1]->type) {
return false;
}
+ //==================================================================
+ //if (ggml_is_quantized(a->type) && ggml_is_quantized(b->type)) {
+ // return false;
+ //}
+ //==================================================================
if (b->type == GGML_TYPE_F16 && a->type != GGML_TYPE_F16 && !ggml_is_quantized(a->type)) {
return false;
}
@@ -3496,6 +3501,9 @@ GGML_CALL static bool ggml_backend_cuda_supports_op(ggml_backend_t backend, cons
if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
return true;
}
+ if (ggml_is_quantized(src0_type) && (src1_type == GGML_TYPE_F16 || src1_type == GGML_TYPE_F32)) {
+ return true;
+ }
if (ggml_is_contiguous(op->src[0]) && ggml_are_same_shape(op->src[0], op->src[1])) {
if (src1_type == GGML_TYPE_F16 || src1_type == GGML_TYPE_BF16 || src1_type == GGML_TYPE_F32) {
return true;