diff options
author | Kawrakow <48489457+ikawrakow@users.noreply.github.com> | 2024-09-09 14:56:34 +0300 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-09-09 14:56:34 +0300 |
commit | 8c86231f9306c81dc291c4c4a16f88bbc7c97793 (patch) | |
tree | d49325de2775076e1f71ddf94667d0cd02db3cc5 /ggml/include/ggml.h | |
parent | bf4b19b474b78a6ddfa1f0fe19f76f3c7ac92030 (diff) |
Adding IQ1_TN - 1.6875 bpw for TriLM ternary models (#44)
* Adding iq1_tn - 1.6875 bpw for TriLM ternary models
* iq1_tn: NEON
* iq1_tn: faster NEON
* iq2_bn: improve performance on NEON
We now get TG-128 = 100 t/s for Bitnet-3B-1.58b!
* iq1_tn: improve AVX2
PP-512 goes to 533 t/s up from 455.
TG-128 @ 2 threads goes to 16.6 t/s up from 14.2.
However, we seem to have a bottleneck somewhere as
TG saturates at 8 threads.
* iq1_tn: improve Zen4
PP-512 goes to 485 t/s up from 352. With FA we get 545 t/s up from 380.
TG-128 @ 1 thread goes to 12.4 t/s up from 10.4.
However, we seem to have a bottleneck somewhere as
TG saturates at 8 threads.
* iq2_bn: improve on Zen4
We now get PP-512 = 614 t/s up from 542 t/s
* iq2_bn: improve AVX2 implementation
We now get PP-512 = 753 t/s up from 680 t/s.
* Remove unnecessary barrier in ggml_compute_forward_mul_mat
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/include/ggml.h')
-rw-r--r-- | ggml/include/ggml.h | 38 |
1 files changed, 21 insertions, 17 deletions
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h index ab6d172d..5b46a70d 100644 --- a/ggml/include/ggml.h +++ b/ggml/include/ggml.h @@ -391,15 +391,17 @@ extern "C" { GGML_TYPE_Q4_0_4_4 = 31, GGML_TYPE_Q4_0_4_8 = 32, GGML_TYPE_Q4_0_8_8 = 33, - GGML_TYPE_IQ1_BN = 34, - GGML_TYPE_IQ2_BN = 35, - GGML_TYPE_Q8_K64 = 36, - GGML_TYPE_IQ2_K = 37, - GGML_TYPE_IQ3_K = 38, - GGML_TYPE_IQ4_K = 39, - GGML_TYPE_IQ5_K = 40, - GGML_TYPE_IQ6_K = 41, - GGML_TYPE_IQ2_TN = 42, + // + GGML_TYPE_IQ1_BN = 134, + GGML_TYPE_IQ2_BN = 135, + GGML_TYPE_Q8_K64 = 136, + GGML_TYPE_IQ2_K = 137, + GGML_TYPE_IQ3_K = 138, + GGML_TYPE_IQ4_K = 139, + GGML_TYPE_IQ5_K = 140, + GGML_TYPE_IQ6_K = 141, + GGML_TYPE_IQ2_TN = 142, + GGML_TYPE_IQ1_TN = 143, GGML_TYPE_COUNT, }; @@ -444,14 +446,16 @@ extern "C" { GGML_FTYPE_MOSTLY_Q4_0_4_4 = 25, // except 1d tensors GGML_FTYPE_MOSTLY_Q4_0_4_8 = 26, // except 1d tensors GGML_FTYPE_MOSTLY_Q4_0_8_8 = 27, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ1_BN = 28, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ2_BN = 29, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ2_K = 30, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ3_K = 31, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ4_K = 32, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ5_K = 33, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ6_K = 34, // except 1d tensors - GGML_FTYPE_MOSTLY_IQ2_TN = 35, // except 1d tensors + // + GGML_FTYPE_MOSTLY_IQ1_BN = 128, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ2_BN = 129, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ2_K = 130, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ3_K = 131, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ4_K = 132, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ5_K = 133, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ6_K = 134, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ2_TN = 135, // except 1d tensors + GGML_FTYPE_MOSTLY_IQ1_TN = 136, // except 1d tensors }; // available tensor operations: |