From fd20638bbcb4b1ba69783312bb78545fa418d3f2 Mon Sep 17 00:00:00 2001 From: Kawrakow Date: Sun, 29 Sep 2024 09:03:52 +0300 Subject: Allow bf16 kv-cache (#69) On the CPU I get the exact same PPL with and without FA using bf16 for kv-cache. But on CUDA the bf16 kv-cache result is about the same as the fp16 kv-cache CPU result, so I'm missing some conversion somewhere. Co-authored-by: Iwan Kawrakow --- src/llama.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'src') diff --git a/src/llama.cpp b/src/llama.cpp index 94a939d8..dca03ade 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -16816,7 +16816,7 @@ struct llama_context * llama_new_context_with_model( params.flash_attn = false; } - if (params.type_v != GGML_TYPE_F16 && !params.flash_attn) { + if (params.type_v != GGML_TYPE_F16 && params.type_v != GGML_TYPE_BF16 && !params.flash_attn) { LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); return nullptr; } -- cgit v1.2.3