summaryrefslogtreecommitdiff
path: root/ggml/src/vulkan-shaders/mul_mm_cm2.comp
blob: 9184657573281455975fc70eaae438e601327822 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
#version 450

#extension GL_EXT_control_flow_attributes : enable
#extension GL_EXT_shader_16bit_storage : require

#extension GL_EXT_shader_explicit_arithmetic_types_float16 : require
#extension GL_EXT_shader_explicit_arithmetic_types_int8 : require
#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
#extension GL_EXT_shader_explicit_arithmetic_types_int16 : require

#extension GL_KHR_memory_scope_semantics : enable
#extension GL_KHR_cooperative_matrix : enable
#extension GL_NV_cooperative_matrix2 : enable
#extension GL_EXT_buffer_reference : enable
#extension GL_KHR_shader_subgroup_ballot : enable
#extension GL_KHR_shader_subgroup_vote : enable
#ifdef DATA_A_BF16
#extension GL_EXT_bfloat16 : enable
#endif

#include "types.comp"

layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;

#define IS_MUL_MM2 1

layout (constant_id = 0) const uint BLOCK_SIZE = 256;
layout (constant_id = 1) const uint BM = 64;
layout (constant_id = 2) const uint BN = 64;
layout (constant_id = 3) const uint BK = 16;  // Assumed to be 32 if working with a quant

layout (constant_id = 4) const bool enable_smaller_matrices = false;
const uint BNover2 = enable_smaller_matrices ? (BN / 2) : BN;
const uint BNover4 = enable_smaller_matrices ? (BN / 4) : BN;

layout (push_constant) uniform parameter
{
    uint M;
    uint N;
    uint K;
    uint stride_a;
    uint stride_b;
    uint stride_d;

    uint batch_stride_a;
    uint batch_stride_b;
    uint batch_stride_d;

#ifdef MUL_MAT_ID
    uint nei0;
    uint nei1;
    uint nbi1;
    uint ne11;
#else
    uint k_split;
    uint ne02;
    uint ne12;
    uint broadcast2;
    uint broadcast3;
#endif
    // N dimension for the B matrix can be >= p.N
    uint padded_N;
} p;


layout (binding = 0) readonly buffer A {A_TYPE data_a[];};
layout (binding = 1) readonly buffer B {B_TYPE data_b[];};
layout (binding = 2) writeonly buffer D {D_TYPE data_d[];};

#if QUANT_K > 1
#define DECODEFUNCA , dequantFuncA

#include "dequant_funcs_cm2.comp"

#else
#define DECODEFUNCA
#endif

#if !defined(fetch_scales)
#define fetch_scales(a, b, c, d, e, f)
#endif
#if !defined(store_scales)
#define store_scales(a)
#endif

#if defined(DATA_A_BF16)
#define MAT_TYPE bfloat16_t
#else
#define MAT_TYPE FLOAT_TYPE
#endif

#ifdef MUL_MAT_ID
layout (binding = 3) readonly buffer IDS {int data_ids[];};

shared u16vec4 row_ids[4096];

layout(buffer_reference, std430, buffer_reference_align = 2) buffer decodeBufB {
   B_TYPE b[];
};

uint _ne1;
shared uint _ne1_sh;

B_TYPE decodeFuncB(const in decodeBufB bl, const in uint blockCoords[2], const in uint coordInBlock[2])
{
    const uint row_i = blockCoords[0];

    if (row_i >= _ne1) {
        return B_TYPE(0.0);
    }

    const u16vec4 row_idx = row_ids[row_i];
    B_TYPE ret = data_b[row_idx.y * p.batch_stride_b + row_idx.x * p.stride_b + blockCoords[1]];

    return ret;
}

D_TYPE perElemOpD(const in uint32_t r, const in uint32_t c, const in D_TYPE elem, const in uint32_t ir, const in uint32_t ic)
{
    uint dr = ir * BM + r;
    uint dc = ic * BN + c;

    if (dr < p.M && dc < _ne1) {
        uint row_i = dc;
        const u16vec4 row_idx = row_ids[row_i];
        data_d[row_idx.y * p.batch_stride_d + row_idx.z * p.stride_d + dr] = elem;
    }
    return elem;
}

#endif

void main() {
#ifdef NEEDS_INIT_IQ_SHMEM
    init_iq_shmem(gl_WorkGroupSize);
#endif

    const uint tid = gl_LocalInvocationIndex;

#ifdef MUL_MAT_ID
    const uint expert_idx = gl_GlobalInvocationID.z;
#else
    const uint batch_idx = gl_GlobalInvocationID.z;

    const uint i13 = batch_idx / p.ne12;
    const uint i12 = batch_idx % p.ne12;

    const uint i03 = i13 / p.broadcast3;
    const uint i02 = i12 / p.broadcast2;

    const uint batch_idx_a = i03 * p.ne02 + i02;
#endif

    const uint blocks_m = (p.M + BM - 1) / BM;
    const uint ir = gl_WorkGroupID.x % blocks_m;
    const uint ik = gl_WorkGroupID.x / blocks_m;
    const uint ic = gl_WorkGroupID.y;

#ifdef MUL_MAT_ID
    // Spread the search across all elements in the first subgroup
    if (gl_SubgroupID == 0) {
        _ne1 = 0;
        uint num_elements = p.nei1 * p.nei0;

        for (uint i = gl_SubgroupInvocationID; subgroupAny(i < num_elements); i += gl_SubgroupSize) {
            bool in_range = i < num_elements;
            uint ii0 = i % p.nei0;
            uint ii1 = i / p.nei0;
            uint id = in_range ? data_ids[ii1*p.nbi1 + ii0] : 0;
            uvec4 ballot = subgroupBallot(in_range && id == expert_idx);
            uint idx = subgroupBallotExclusiveBitCount(ballot);
            if (in_range && id == expert_idx) {
                row_ids[_ne1 + idx] = u16vec4(ii0 % p.ne11, ii1, ii0, 0);
            }
            _ne1 += subgroupBallotBitCount(ballot);
        }
        _ne1_sh = _ne1;
    }

    barrier();

    _ne1 = _ne1_sh;

    // Workgroup has no work
    if (ic * BN >= _ne1) return;
#endif

#ifdef MUL_MAT_ID
    uint start_k = 0;
    const uint end_k = p.K;
#else
    uint start_k = ik * p.k_split;
    const uint end_k = min(p.K, (ik + 1) * p.k_split);
#endif

#ifdef MUL_MAT_ID
    uint pos_a = (expert_idx * p.batch_stride_a) / QUANT_K;
    uint pos_b = 0;
#else
    uint pos_a = (batch_idx_a * p.batch_stride_a) / QUANT_K;
    uint pos_b = batch_idx * p.batch_stride_b;
    uint pos_d = batch_idx * p.batch_stride_d + ik * p.batch_stride_d * gl_NumWorkGroups.z;
#endif

    uint stride_a = p.stride_a / QUANT_K;
    uint stride_b = p.stride_b;

    // Hint to the compiler that values are aligned (want 16B alignment).
    // Quants are always block-aligned, no alignment needed.
#if ALIGNED
#if QUANT_K == 1
    stride_a &= ~7;
#endif
    stride_b &= ~7;
#endif

    // Create layouts for both clamped and unclamped accesses
    tensorLayoutNV<2> tensorLayoutA = createTensorLayoutNV(2);
    tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutAClamp = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
    tensorLayoutNV<2> tensorLayoutB = createTensorLayoutNV(2);
    tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutBClamp = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
    tensorLayoutNV<2, gl_CooperativeMatrixClampModeConstantNV> tensorLayoutD = createTensorLayoutNV(2, gl_CooperativeMatrixClampModeConstantNV);
    tensorLayoutD = setTensorLayoutStrideNV(tensorLayoutD, p.stride_d, 1);

#if QUANT_K > 1
    tensorLayoutA = setTensorLayoutBlockSizeNV(tensorLayoutA, 1, QUANT_K);
    tensorLayoutAClamp = setTensorLayoutBlockSizeNV(tensorLayoutAClamp, 1, QUANT_K);
#endif

    // Use end_k rather than p.K as the dimension because that's what
    // we need to bound check against when using split_k.
    // Bounds check B against padded_N, but bounds check D against N.
    tensorLayoutA = setTensorLayoutDimensionNV(tensorLayoutA, p.M, end_k);
    tensorLayoutB = setTensorLayoutDimensionNV(tensorLayoutB, p.padded_N, end_k);
    tensorLayoutD = setTensorLayoutDimensionNV(tensorLayoutD, p.N, p.M);
    tensorLayoutAClamp = setTensorLayoutDimensionNV(tensorLayoutAClamp, p.M, end_k);
    tensorLayoutBClamp = setTensorLayoutDimensionNV(tensorLayoutBClamp, p.padded_N, end_k);

    tensorViewNV<2, false, 1, 0> tensorViewTranspose = createTensorViewNV(2, false, 1, 0);

#if !defined(MUL_MAT_ID)

    const uint START_ALIGN_K = 256;
    // For Qi_K (block size 256), unroll whole 256 element tiles.
    // For legacy quants (block size 32), unroll 8x.
    const uint UNROLL_K = (QUANT_K == 256) ? 256 : (BK * 8);
    const uint unroll_count = UNROLL_K / BK;

    // Detect a fast path where all loads are entirely in bounds and no clamping is required
    if ((ir + 1) * BM <= p.M && (ic + 1) * BN <= p.padded_N && (start_k % START_ALIGN_K) == 0 && (end_k % BK) == 0 &&
#if QUANT_K == 1
        (stride_a % 8) == 0 &&
#endif
        (stride_b % 8) == 0) {
        // Hint to the compiler that values are aligned (want 16B alignment)
        start_k &= ~(START_ALIGN_K-1);
        stride_b &= ~7;
#if QUANT_K == 1
        stride_a &= ~7;
#endif

        tensorLayoutA = setTensorLayoutStrideNV(tensorLayoutA, stride_a, 1);
        tensorLayoutB = setTensorLayoutStrideNV(tensorLayoutB, stride_b, 1);

        uint k_iters = (end_k - start_k) / UNROLL_K;
        uint block_k = start_k;

        // fetch scale values for a tile of quants. These will be copied into shared memory.
        // The fetches and stores are pipelined to hide the latency.
        fetch_scales(ir * BM, pos_a, stride_a, start_k, tid, true);

        if (enable_smaller_matrices && ic * BN + BNover4 >= p.N) {
            coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BNover4, gl_MatrixUseAccumulator> sum = coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BNover4, gl_MatrixUseAccumulator>(0.0);
            for (uint i = 0; i < k_iters; ++i) {

                store_scales(tid);
                if (block_k + UNROLL_K < end_k) {
                    fetch_scales(ir * BM, pos_a, stride_a, block_k + UNROLL_K, tid, true);
                }

                // Manually partial unroll
                [[unroll]] for (uint j = 0; j < unroll_count; ++j) {
                    coopmat<MAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
                    coopmat<MAT_TYPE, gl_ScopeWorkgroup, BK, BNover4, gl_MatrixUseB> mat_b;

                    coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA);
                    coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BNover4, block_k, BK), tensorViewTranspose);

                    sum = coopMatMulAdd(mat_a, mat_b, sum);
                    block_k += BK;
                }
            }
            // Do any remaining iterations that were not unrolled
            if (block_k < end_k) {
                store_scales(tid);
            }
            while (block_k < end_k) {
                coopmat<MAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
                coopmat<MAT_TYPE, gl_ScopeWorkgroup, BK, BNover4, gl_MatrixUseB> mat_b;

                coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA);
                coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BNover4, block_k, BK), tensorViewTranspose);

                sum = coopMatMulAdd(mat_a, mat_b, sum);
                block_k += BK;
            }
            coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BNover4, gl_MatrixUseAccumulator> mat_d = coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BNover4, gl_MatrixUseAccumulator>(sum);

            coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BNover4, ir * BM, BM), tensorViewTranspose);
            return;
        } else if (enable_smaller_matrices && ic * BN + BNover2 >= p.N) {
            coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BNover2, gl_MatrixUseAccumulator> sum = coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BNover2, gl_MatrixUseAccumulator>(0.0);
            for (uint i = 0; i < k_iters; ++i) {

                store_scales(tid);
                if (block_k + UNROLL_K < end_k) {
                    fetch_scales(ir * BM, pos_a, stride_a, block_k + UNROLL_K, tid, true);
                }

                // Manually partial unroll
                [[unroll]] for (uint j = 0; j < unroll_count; ++j) {
                    coopmat<MAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
                    coopmat<MAT_TYPE, gl_ScopeWorkgroup, BK, BNover2, gl_MatrixUseB> mat_b;

                    coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA);
                    coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BNover2, block_k, BK), tensorViewTranspose);

                    sum = coopMatMulAdd(mat_a, mat_b, sum);
                    block_k += BK;
                }
            }
            // Do any remaining iterations that were not unrolled
            if (block_k < end_k) {
                store_scales(tid);
            }
            while (block_k < end_k) {
                coopmat<MAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
                coopmat<MAT_TYPE, gl_ScopeWorkgroup, BK, BNover2, gl_MatrixUseB> mat_b;

                coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA);
                coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BNover2, block_k, BK), tensorViewTranspose);

                sum = coopMatMulAdd(mat_a, mat_b, sum);
                block_k += BK;
            }
            coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BNover2, gl_MatrixUseAccumulator> mat_d = coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BNover2, gl_MatrixUseAccumulator>(sum);

            coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BNover2, ir * BM, BM), tensorViewTranspose);
            return;
        } else {
            coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator> sum = coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator>(0.0);

            for (uint i = 0; i < k_iters; ++i) {

                store_scales(tid);
                if (block_k + UNROLL_K < end_k) {
                    fetch_scales(ir * BM, pos_a, stride_a, block_k + UNROLL_K, tid, true);
                }

                // Manually partial unroll
                [[unroll]] for (uint j = 0; j < unroll_count; ++j) {
                    coopmat<MAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
                    coopmat<MAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB> mat_b;

                    coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA);
                    coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose);

                    sum = coopMatMulAdd(mat_a, mat_b, sum);
                    block_k += BK;
                }
            }
            // Do any remaining iterations that were not unrolled
            if (block_k < end_k) {
                store_scales(tid);
            }
            while (block_k < end_k) {
                coopmat<MAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
                coopmat<MAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB> mat_b;

                coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutA, ir * BM, BM, block_k, BK) DECODEFUNCA);
                coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose);

                sum = coopMatMulAdd(mat_a, mat_b, sum);
                block_k += BK;
            }
            coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator> mat_d = coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator>(sum);

            coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BN, ir * BM, BM), tensorViewTranspose);
            return;
        }
    } else
#endif // !defined(MUL_MAT_ID)
    {
        tensorLayoutA = setTensorLayoutStrideNV(tensorLayoutA, stride_a, 1);

        tensorLayoutAClamp = setTensorLayoutStrideNV(tensorLayoutAClamp, stride_a, 1);

        tensorLayoutB = setTensorLayoutStrideNV(tensorLayoutB, stride_b, 1);

        tensorLayoutBClamp = setTensorLayoutStrideNV(tensorLayoutBClamp, stride_b, 1);

        coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator> sum;
        sum = coopmat<ACC_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator>(0.0);

        uint k_iters = (end_k - start_k + BK - 1) / BK;

        fetch_scales(ir * BM, pos_a, stride_a, start_k, tid, false);

        [[dont_unroll]]
        for (uint block_k = start_k, i = 0; i < k_iters; block_k += BK, ++i) {

            store_scales(tid);
            if (block_k + BK < end_k) {
                fetch_scales(ir * BM, pos_a, stride_a, block_k + BK, tid, false);
            }

            coopmat<MAT_TYPE, gl_ScopeWorkgroup, BM, BK, gl_MatrixUseA> mat_a;
            coopmat<MAT_TYPE, gl_ScopeWorkgroup, BK, BN, gl_MatrixUseB> mat_b;

            coopMatLoadTensorNV(mat_a, data_a, pos_a, sliceTensorLayoutNV(tensorLayoutAClamp, ir * BM, BM, block_k, BK) DECODEFUNCA);
#ifdef MUL_MAT_ID
            coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutB, ic * BN, BN, block_k, BK), tensorViewTranspose, decodeFuncB);
#else
            coopMatLoadTensorNV(mat_b, data_b, pos_b, sliceTensorLayoutNV(tensorLayoutBClamp, ic * BN, BN, block_k, BK), tensorViewTranspose);
#endif

            sum = coopMatMulAdd(mat_a, mat_b, sum);
        }

        // Convert from ACC_TYPE to D_TYPE
        coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator> mat_d;
        mat_d = coopmat<D_TYPE, gl_ScopeWorkgroup, BM, BN, gl_MatrixUseAccumulator>(sum);

#ifdef MUL_MAT_ID
        // Call callback to store each element, remapping row through shared memory
        coopMatPerElementNV(mat_d, mat_d, perElemOpD, ir, ic);
#else
        coopMatStoreTensorNV(mat_d, data_d, pos_d, sliceTensorLayoutNV(tensorLayoutD, ic * BN, BN, ir * BM, BM), tensorViewTranspose);
#endif
    }
}