summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorslaren <slarengh@gmail.com>2023-11-05 18:45:16 +0100
committerGitHub <noreply@github.com>2023-11-05 18:45:16 +0100
commit2833a6f63c1b87c7f4ac574bcf7a15a2f3bf3ede (patch)
tree2d063c1aa363c6b615daa44c877e6ba78bab122b
parentd9ccce2e339ca0396560d18b8637f2c848d72a08 (diff)
ggml-cuda : fix f16 mul mat (#3961)
* ggml-cuda : fix f16 mul mat ggml-ci * silence common.cpp warning (bonus)
-rw-r--r--common/common.cpp2
-rw-r--r--ggml-cuda.cu8
2 files changed, 6 insertions, 4 deletions
diff --git a/common/common.cpp b/common/common.cpp
index 37e3ace8..6a711420 100644
--- a/common/common.cpp
+++ b/common/common.cpp
@@ -101,8 +101,8 @@ void process_escapes(std::string& input) {
input[output_idx++] = char(val);
break;
}
- // Intentionally fall through to default.
}
+ // fall through
default: input[output_idx++] = '\\';
input[output_idx++] = input[input_idx]; break;
}
diff --git a/ggml-cuda.cu b/ggml-cuda.cu
index 9f873035..2d9ffffb 100644
--- a/ggml-cuda.cu
+++ b/ggml-cuda.cu
@@ -7414,6 +7414,8 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
(src1->backend == GGML_BACKEND_GPU) &&
( dst->backend == GGML_BACKEND_GPU);
+ const bool split = src0->backend == GGML_BACKEND_GPU_SPLIT;
+
int64_t min_compute_capability = INT_MAX;
for (int64_t id = 0; id < g_device_count; ++id) {
if (min_compute_capability > g_compute_capabilities[id] && g_tensor_split[id] < (id + 1 < g_device_count ? g_tensor_split[id + 1] : 1.0f)) {
@@ -7435,13 +7437,13 @@ static void ggml_cuda_mul_mat(const ggml_tensor * src0, const ggml_tensor * src1
//printf("src0 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src0), ggml_is_transposed(src0), ggml_type_name(src0->type), src0->name);
//printf("src1 is contiguous %d, transposed %d, type = %s, name = %s\n", ggml_is_contiguous(src1), ggml_is_transposed(src1), ggml_type_name(src1->type), src1->name);
- if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
+ if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
// KQ single-batch
ggml_cuda_mul_mat_vec_p021(src0, src1, dst);
- } else if (all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
+ } else if (!split && all_on_device && !use_tensor_cores && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1) {
// KQV single-batch
ggml_cuda_mul_mat_vec_nc(src0, src1, dst);
- } else if (all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
+ } else if (!split && all_on_device && use_tensor_cores && src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1)) {
// KQ + KQV multi-batch
ggml_cuda_mul_mat_mat_batched_cublas(src0, src1, dst);
} else if (src0->type == GGML_TYPE_F32) {