From 61d1c88e155515dd03940913a5707ea84a8b119b Mon Sep 17 00:00:00 2001 From: 0cc4m Date: Tue, 5 Mar 2024 13:33:42 +0100 Subject: Vulkan Improvements (#5835) * Improve dequant shaders, add fast q4_0 dequant * Optimize dmmv non-kquants for GCN Remove unnecessary SPIR-V shader duplication * Fix q4_0 dequant dispatch sizes Fix backend free bug * Optimize dequant shaders for q4_1, q5_0, q5_1 and q8_0 * Add unary and binary op shader templates * Fix Vulkan check results * Enable non-contiguous support for simple ops * Add argsort Basic q4_0 mmq shader and unit test * Speed up q4_0 dequant code, enable mmq for q4_0 * Rework matmul pipeline selection * Add soft_max alibi support * Add q4_1, q5_0, q5_1 and q8_0 dequant mat mat mul shaders * Add environment variable GGML_VK_FORCE_MAX_ALLOCATION_SIZE to limit max buffer size Rename GGML_VULKAN_DISABLE_F16 to GGML_VK_DISABLE_F16 for consistency --- llama.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'llama.cpp') diff --git a/llama.cpp b/llama.cpp index 76afcbc1..e9192b4f 100644 --- a/llama.cpp +++ b/llama.cpp @@ -5014,8 +5014,8 @@ static struct ggml_tensor * llm_build_kqv( ggml_mul_mat_set_prec(kq, GGML_PREC_F32); } -#if defined(GGML_USE_VULKAN) || defined(GGML_USE_KOMPUTE) -#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Vulkan, and Kompute") +#if defined(GGML_USE_KOMPUTE) +#pragma message("TODO: ALiBi support in ggml_soft_max_ext is not implemented for Kompute") #pragma message(" Falling back to ggml_alibi(). Will become an error in Mar 2024") #pragma message("ref: https://github.com/ggerganov/llama.cpp/pull/5488") if (hparams.f_max_alibi_bias > 0.0f) { -- cgit v1.2.3