diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-10-24 12:20:30 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-10-24 12:20:30 +0200 |
commit | 9114078959b404899fd67e1af45f0dcbee51b47f (patch) | |
tree | 754c2a10b54df315439e86e3106779f593a49504 /ggml/src/ggml-cuda | |
parent | b61cf7d0d7e7c5d971087d2f919818fbf684809e (diff) |
Fix quantized k-cache without FA (#105)
* Added Johannes' changes, still getting NaNs with quantized k-cache.
Also getting NaN's on Johannes's mainline branch.
* This fixes it
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml-cuda')
-rw-r--r-- | ggml/src/ggml-cuda/mmq.cu | 4 | ||||
-rw-r--r-- | ggml/src/ggml-cuda/quantize.cu | 7 |
2 files changed, 3 insertions, 8 deletions
diff --git a/ggml/src/ggml-cuda/mmq.cu b/ggml/src/ggml-cuda/mmq.cu index 78d70cd7..09d3e9c7 100644 --- a/ggml/src/ggml-cuda/mmq.cu +++ b/ggml/src/ggml-cuda/mmq.cu @@ -8,8 +8,6 @@ void ggml_cuda_op_mul_mat_q( const int64_t ne00 = src0->ne[0]; - const int64_t nb01 = src0->nb[1]; - const int64_t ne10 = src1->ne[0]; const int64_t ne11 = src1->ne[1]; GGML_ASSERT(ne10 % QK8_1 == 0); @@ -17,7 +15,7 @@ void ggml_cuda_op_mul_mat_q( const int64_t ne0 = dst->ne[0]; const int64_t row_diff = row_high - row_low; - const int64_t stride00 = nb01 / ggml_type_size(src0->type); + const int64_t stride00 = ne00 / ggml_blck_size(src0->type); int id = ggml_cuda_get_device(); const int compute_capability = ggml_cuda_info().devices[id].cc; diff --git a/ggml/src/ggml-cuda/quantize.cu b/ggml/src/ggml-cuda/quantize.cu index 45408ce8..65c7e5f1 100644 --- a/ggml/src/ggml-cuda/quantize.cu +++ b/ggml/src/ggml-cuda/quantize.cu @@ -84,7 +84,8 @@ static __global__ void quantize_mmq_q8_1( } } - const float d_inv = 127.0f / amax; + const float d = amax/127.f; + const float d_inv = d > 0 ? 1/d : 0.f; char4 q; q.x = roundf(xi.x*d_inv); q.y = roundf(xi.y*d_inv); @@ -106,8 +107,6 @@ static __global__ void quantize_mmq_q8_1( return; } - const float d = 1.0f / d_inv; - y[ib].d2s6[iqs/64] = d; return; @@ -117,8 +116,6 @@ static __global__ void quantize_mmq_q8_1( return; } - const float d = 1.0f / d_inv; - if (ds_layout == MMQ_Q8_1_DS_LAYOUT_DS4) { y[ib].ds4[iqs/32] = make_half2(d, sum); } else { |