summaryrefslogtreecommitdiff
path: root/ggml/src/ggml.c
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2024-10-25 13:08:43 +0200
committerGitHub <noreply@github.com>2024-10-25 13:08:43 +0200
commit6b968f38946117552ffed300771c44ba9b39d3e4 (patch)
treedc6b0df69f31ea77d9941d6798a4ef411c688080 /ggml/src/ggml.c
parent9114078959b404899fd67e1af45f0dcbee51b47f (diff)
Bitnet changes (#106)
* Adapting iq2_bn to work without separate scale tensors Why? It is becoming burdensome to maintain the special Bitnet conversion in convert_hf_to_gguf.py, so I thnk it is better to make iq1_bn and iq2_bn just work with the mainline conversion script (which does not generate scales). * Adapting iq1_bn to work without separate scale tensors * Adapting iq2_bn: CUDA dequantize * Adapting iq2_bn: CUDA works * Adapting iq1_bn: CUDA works * Adapting iq1_bn, iq2_bn: NEON * Adapting iq1_bn, iq2_bn: Metal Dequantize works, but there is still something wrong with the dot products. * WIP Absoolutely don't see what is wrong with the iq1_bn and iq2_bn vector dot product kernels. * Remove iq1_tn and iq2_tn - Part 1 Now that iq1_bn and iq2_bn have per row scales, there is no reason to also have iq1_tn and iq2_tn. * Remove iq1_tn and iq2_tn - Part 2 * Bitnet: use the standard llm_build_kv to build self attention My main motivation was to enable FA. But FA does not work anyway because head size is 100 for the Botnet ternary models (and I had forgotten this little detail). * Revert "Avoid rebuild of GGML graph for each token (#98)" This reverts commit f2d315b46f7aacc7df4b86bd8acba387b30e11ca. As far as I can tell, the commit breaks Metal TG. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/src/ggml.c')
-rw-r--r--ggml/src/ggml.c46
1 files changed, 1 insertions, 45 deletions
diff --git a/ggml/src/ggml.c b/ggml/src/ggml.c
index 35ed68d0..5570b1fc 100644
--- a/ggml/src/ggml.c
+++ b/ggml/src/ggml.c
@@ -1016,7 +1016,7 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.vec_dot = ggml_vec_dot_iq1_bn_q8_K64,
.vec_dot_type = GGML_TYPE_Q8_K64,
.nrows = 1,
- .row_meta_size = 0,
+ .row_meta_size = 2,
},
[GGML_TYPE_IQ2_BN] = {
.type_name = "iq2_bn",
@@ -1029,34 +1029,8 @@ static const ggml_type_traits_t type_traits[GGML_TYPE_COUNT] = {
.vec_dot = ggml_vec_dot_iq2_bn_q8_K64,
.vec_dot_type = GGML_TYPE_Q8_K64,
.nrows = 1,
- .row_meta_size = 0,
- },
- [GGML_TYPE_IQ2_TN] = {
- .type_name = "iq2_tn",
- .blck_size = QK_K,
- .type_size = sizeof(block_iq2_tn),
- .is_quantized = true,
- .to_float = (ggml_to_float_t) dequantize_row_iq2_tn,
- .from_float = quantize_row_iq2_tn,
- .from_float_ref = (ggml_from_float_t)quantize_row_iq2_tn_ref,
- .vec_dot = vec_dot_iq2_tn_q8_k,
- .vec_dot_type = GGML_TYPE_Q8_K,
- .nrows = 1,
.row_meta_size = 4,
},
- [GGML_TYPE_IQ1_TN] = {
- .type_name = "iq1_tn",
- .blck_size = QK_K,
- .type_size = sizeof(block_iq1_tn),
- .is_quantized = true,
- .to_float = (ggml_to_float_t) dequantize_row_iq1_tn,
- .from_float = quantize_row_iq1_tn,
- .from_float_ref = (ggml_from_float_t)quantize_row_iq1_tn_ref,
- .vec_dot = vec_dot_iq1_tn_q8_k,
- .vec_dot_type = GGML_TYPE_Q8_K64,
- .nrows = 1,
- .row_meta_size = 2,
- },
[GGML_TYPE_IQ4_NL] = {
.type_name = "iq4_nl",
.blck_size = QK4_NL,
@@ -3926,8 +3900,6 @@ enum ggml_type ggml_ftype_to_ggml_type(enum ggml_ftype ftype) {
case GGML_FTYPE_MOSTLY_IQ1_M: wtype = GGML_TYPE_IQ1_M; break;
case GGML_FTYPE_MOSTLY_IQ1_BN: wtype = GGML_TYPE_IQ1_BN; break;
case GGML_FTYPE_MOSTLY_IQ2_BN: wtype = GGML_TYPE_IQ2_BN; break;
- case GGML_FTYPE_MOSTLY_IQ2_TN: wtype = GGML_TYPE_IQ2_TN; break;
- case GGML_FTYPE_MOSTLY_IQ1_TN: wtype = GGML_TYPE_IQ1_TN; break;
case GGML_FTYPE_MOSTLY_IQ4_NL: wtype = GGML_TYPE_IQ4_NL; break;
case GGML_FTYPE_MOSTLY_IQ4_XS: wtype = GGML_TYPE_IQ4_XS; break;
case GGML_FTYPE_MOSTLY_IQ4_KS: wtype = GGML_TYPE_IQ4_KS; break;
@@ -10428,8 +10400,6 @@ static void ggml_compute_forward_add(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
- case GGML_TYPE_IQ2_TN:
- case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -10819,8 +10789,6 @@ static void ggml_compute_forward_add1(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
- case GGML_TYPE_IQ2_TN:
- case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -10960,8 +10928,6 @@ static void ggml_compute_forward_acc(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
- case GGML_TYPE_IQ2_TN:
- case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -14147,8 +14113,6 @@ static void ggml_compute_forward_out_prod(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
- case GGML_TYPE_IQ2_TN:
- case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -14528,8 +14492,6 @@ static void ggml_compute_forward_set(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
- case GGML_TYPE_IQ2_TN:
- case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -14803,8 +14765,6 @@ static void ggml_compute_forward_get_rows(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
- case GGML_TYPE_IQ2_TN:
- case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -15405,8 +15365,6 @@ static void ggml_compute_forward_clamp(
case GGML_TYPE_IQ1_M:
case GGML_TYPE_IQ1_BN:
case GGML_TYPE_IQ2_BN:
- case GGML_TYPE_IQ2_TN:
- case GGML_TYPE_IQ1_TN:
case GGML_TYPE_IQ4_NL:
case GGML_TYPE_IQ4_XS:
case GGML_TYPE_IQ4_KS:
@@ -22224,8 +22182,6 @@ size_t ggml_quantize_chunk(
case GGML_TYPE_IQ1_M: result = quantize_iq1_m (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ1_BN: result = quantize_iq1_bn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ2_BN: result = quantize_iq2_bn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
- case GGML_TYPE_IQ2_TN: result = quantize_iq2_tn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
- case GGML_TYPE_IQ1_TN: result = quantize_iq1_tn (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_NL: result = quantize_iq4_nl (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_XS: result = quantize_iq4_xs (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;
case GGML_TYPE_IQ4_KS: result = quantize_iq4_ks (src + start, (char *) dst + start_row * row_size, nrows, n_per_row, imatrix); break;