diff options
author | Kawrakow <iwankawrakow@gmail.com> | 2024-12-15 09:54:21 +0100 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-12-15 09:54:21 +0100 |
commit | 85c5a1a99569ccc00c280835fe3a69b4af02c43b (patch) | |
tree | da421487d5ddd0467b2bfd6cbbfb2666406c46f1 /src | |
parent | 20758edcae65213b2f575b6d23dfea67ad9dd0e0 (diff) |
BF16_R16 - 16 interleaved bf16 rows (#142)
* Not working bf16_r4
* Adding bf16_r8
Small performance gain compared to bf16 - 258 t/s vs 234 t/s.
I guess, this is still sub-obtimal.
* bf16_rx: Very slightly faster by interleaving 16 rows
258 t/s -> 263 t/s
* Rename bf16_r4 to bf16_r16
We are interleaving 16 rows now.
* Cleanup unused stuff
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'src')
-rw-r--r-- | src/llama.cpp | 13 |
1 files changed, 13 insertions, 0 deletions
diff --git a/src/llama.cpp b/src/llama.cpp index 035e5b1a..536b2f97 100644 --- a/src/llama.cpp +++ b/src/llama.cpp @@ -3828,6 +3828,7 @@ struct llama_model_loader { case GGML_TYPE_F32: ftype = LLAMA_FTYPE_ALL_F32; break; case GGML_TYPE_F16: ftype = LLAMA_FTYPE_MOSTLY_F16; break; case GGML_TYPE_BF16: ftype = LLAMA_FTYPE_MOSTLY_BF16; break; + case GGML_TYPE_BF16_R16:ftype = LLAMA_FTYPE_MOSTLY_BF16_R16;break; case GGML_TYPE_Q4_0: ftype = LLAMA_FTYPE_MOSTLY_Q4_0; break; case GGML_TYPE_Q4_1: ftype = LLAMA_FTYPE_MOSTLY_Q4_1; break; case GGML_TYPE_Q5_0: ftype = LLAMA_FTYPE_MOSTLY_Q5_0; break; @@ -4540,6 +4541,7 @@ static std::string llama_model_ftype_name(llama_ftype ftype) { case LLAMA_FTYPE_ALL_F32: return "all F32"; case LLAMA_FTYPE_MOSTLY_F16: return "F16"; case LLAMA_FTYPE_MOSTLY_BF16: return "BF16"; + case LLAMA_FTYPE_MOSTLY_BF16_R16: return "BF16_R16"; case LLAMA_FTYPE_MOSTLY_Q4_0: return "Q4_0"; case LLAMA_FTYPE_MOSTLY_Q4_1: return "Q4_1"; case LLAMA_FTYPE_MOSTLY_Q5_0: return "Q5_0"; @@ -15833,6 +15835,9 @@ static ggml_type llama_tensor_get_type(quantize_state_internal & qs, ggml_type n else if (new_type == GGML_TYPE_Q8_0_R4) { new_type = GGML_TYPE_Q8_0; } + else if (new_type == GGML_TYPE_BF16_R16) { + new_type = GGML_TYPE_BF16; + } } } else if (ftype == LLAMA_FTYPE_MOSTLY_IQ2_XXS || ftype == LLAMA_FTYPE_MOSTLY_IQ2_XS || ftype == LLAMA_FTYPE_MOSTLY_IQ1_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_S || ftype == LLAMA_FTYPE_MOSTLY_IQ2_M || ftype == LLAMA_FTYPE_MOSTLY_IQ1_M || @@ -16228,6 +16233,7 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s case LLAMA_FTYPE_MOSTLY_Q8_0: default_type = GGML_TYPE_Q8_0; break; case LLAMA_FTYPE_MOSTLY_F16: default_type = GGML_TYPE_F16; break; case LLAMA_FTYPE_MOSTLY_BF16: default_type = GGML_TYPE_BF16; break; + case LLAMA_FTYPE_MOSTLY_BF16_R16: default_type = GGML_TYPE_BF16_R16; break; case LLAMA_FTYPE_ALL_F32: default_type = GGML_TYPE_F32; break; // K-quants @@ -16520,6 +16526,9 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s if (quantize) { new_type = default_type; + if (new_type == GGML_TYPE_BF16_R16 && strcmp(tensor->name, "token_embd.weight") == 0) { + new_type = GGML_TYPE_BF16; + } // get more optimal quantization type based on the tensor shape, layer, etc. if (!params->pure && ggml_is_quantized(default_type)) { @@ -16680,6 +16689,10 @@ static void llama_model_quantize_internal(const std::string & fname_inp, const s if (tensor->ne[1] % 4 != 0) new_type = GGML_TYPE_IQ4_K; else chunk_size_multiplier = 4; } + else if (new_type == GGML_TYPE_BF16_R16) { + if (tensor->ne[1] % 16 != 0) new_type = GGML_TYPE_BF16; + else chunk_size_multiplier = 16; + } LLAMA_LOG_INFO("converting to %s .. ", ggml_type_name(new_type)); fflush(stdout); |