summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2025-01-10 18:18:04 +0200
committerGitHub <noreply@github.com>2025-01-10 18:18:04 +0200
commit7553989dd88749de028853f9c0ea39651aad92a3 (patch)
tree71783a2c138437c940492ca2b6f0e218f20a7b4c /src
parentb1363b6177661556750c110cf876e044e61af365 (diff)
Be able to re-quantize MS BitNet I2_S models (#169)
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'src')
-rw-r--r--src/llama.cpp6
1 files changed, 6 insertions, 0 deletions
diff --git a/src/llama.cpp b/src/llama.cpp
index 54b9b118..d330904f 100644
--- a/src/llama.cpp
+++ b/src/llama.cpp
@@ -15698,6 +15698,12 @@ static void llama_tensor_dequantize_internal(
throw std::runtime_error(format("cannot dequantize/convert tensor type %s", ggml_type_name(tensor->type)));
}
+ if (tensor->type == GGML_TYPE_I2_S) {
+ // we need to dequantize the entire tensor for I2_S
+ qtype.to_float(tensor->data, f32_output, nelements);
+ return;
+ }
+
if (nthread < 2) {
if (tensor->type == GGML_TYPE_F16) {
ggml_fp16_to_fp32_row((ggml_fp16_t *)tensor->data, f32_output, nelements);