From a2d24c97e5c5c28aeb3669dcc0044b69258a85ca Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Sat, 10 May 2025 18:52:54 +0300 Subject: TG improvements for MoE models (#404) * cuda: Remove unnecessary device to host copy of row ids We get 3-4% TG speed improvement for DeepSeek-Lite just from that. * CPU: fix get_rows when SER is used With smart experts reduction (SER), one potentially uses fewer experts than specified by the model. This is accomplished by setting the ID of the not seected tensors to -1. Most of the necessary stuff was implemented when I added the SER option, but I forgot to update get_rows() for not quantized tensors. As a result, we get random garbage for the weights of the not-selected epxerts, which leads to garbage output. This commit fixes it on the CPU. I'm not quite sure yet why the GPU is not working. * CUDA: fix TG with SER --------- Co-authored-by: Iwan Kawrakow --- ggml/src/ggml-cuda.cu | 5 ----- 1 file changed, 5 deletions(-) (limited to 'ggml/src/ggml-cuda.cu') diff --git a/ggml/src/ggml-cuda.cu b/ggml/src/ggml-cuda.cu index ff6e064c..87f80d0c 100644 --- a/ggml/src/ggml-cuda.cu +++ b/ggml/src/ggml-cuda.cu @@ -2505,11 +2505,6 @@ static bool ggml_cuda_up_gate_unary(ggml_backend_cuda_context & ctx, ggml_tensor dst_padded_col_size, next->src[0]->type, stream); CUDA_CHECK(cudaGetLastError()); - std::vector ids_host(ggml_nbytes(ids)); - const char * ids_dev = (const char *) ids->data; - CUDA_CHECK(cudaMemcpyAsync(ids_host.data(), ids_dev, ggml_nbytes(ids), cudaMemcpyDeviceToHost, stream)); - CUDA_CHECK(cudaStreamSynchronize(stream)); - local_dst.ne[2] = 1; auto local_next = *next; -- cgit v1.2.3