summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2024-12-15 09:54:21 +0100
committerGitHub <noreply@github.com>2024-12-15 09:54:21 +0100
commit85c5a1a99569ccc00c280835fe3a69b4af02c43b (patch)
treeda421487d5ddd0467b2bfd6cbbfb2666406c46f1 /include
parent20758edcae65213b2f575b6d23dfea67ad9dd0e0 (diff)
BF16_R16 - 16 interleaved bf16 rows (#142)
* Not working bf16_r4 * Adding bf16_r8 Small performance gain compared to bf16 - 258 t/s vs 234 t/s. I guess, this is still sub-obtimal. * bf16_rx: Very slightly faster by interleaving 16 rows 258 t/s -> 263 t/s * Rename bf16_r4 to bf16_r16 We are interleaving 16 rows now. * Cleanup unused stuff --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'include')
-rw-r--r--include/llama.h1
1 files changed, 1 insertions, 0 deletions
diff --git a/include/llama.h b/include/llama.h
index e4d6ed3d..988ffec7 100644
--- a/include/llama.h
+++ b/include/llama.h
@@ -191,6 +191,7 @@ extern "C" {
LLAMA_FTYPE_MOSTLY_IQ4_NL_R4 = 225, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_XS_R4 = 230, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q6_0_R4 = 335, // except 1d tensors
+ LLAMA_FTYPE_MOSTLY_BF16_R16 = 232, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ2_BN_R4 = 337, // except 1d tensors
LLAMA_FTYPE_MOSTLY_IQ4_K_R4 = 340, // except 1d tensors
LLAMA_FTYPE_MOSTLY_Q8_K_R8 = 399, // except 1d tensors