summaryrefslogtreecommitdiff
path: root/ggml/include
diff options
context:
space:
mode:
authorKawrakow <iwankawrakow@gmail.com>2024-10-25 13:08:43 +0200
committerGitHub <noreply@github.com>2024-10-25 13:08:43 +0200
commit6b968f38946117552ffed300771c44ba9b39d3e4 (patch)
treedc6b0df69f31ea77d9941d6798a4ef411c688080 /ggml/include
parent9114078959b404899fd67e1af45f0dcbee51b47f (diff)
Bitnet changes (#106)
* Adapting iq2_bn to work without separate scale tensors Why? It is becoming burdensome to maintain the special Bitnet conversion in convert_hf_to_gguf.py, so I thnk it is better to make iq1_bn and iq2_bn just work with the mainline conversion script (which does not generate scales). * Adapting iq1_bn to work without separate scale tensors * Adapting iq2_bn: CUDA dequantize * Adapting iq2_bn: CUDA works * Adapting iq1_bn: CUDA works * Adapting iq1_bn, iq2_bn: NEON * Adapting iq1_bn, iq2_bn: Metal Dequantize works, but there is still something wrong with the dot products. * WIP Absoolutely don't see what is wrong with the iq1_bn and iq2_bn vector dot product kernels. * Remove iq1_tn and iq2_tn - Part 1 Now that iq1_bn and iq2_bn have per row scales, there is no reason to also have iq1_tn and iq2_tn. * Remove iq1_tn and iq2_tn - Part 2 * Bitnet: use the standard llm_build_kv to build self attention My main motivation was to enable FA. But FA does not work anyway because head size is 100 for the Botnet ternary models (and I had forgotten this little detail). * Revert "Avoid rebuild of GGML graph for each token (#98)" This reverts commit f2d315b46f7aacc7df4b86bd8acba387b30e11ca. As far as I can tell, the commit breaks Metal TG. --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'ggml/include')
-rw-r--r--ggml/include/ggml-backend.h6
-rw-r--r--ggml/include/ggml.h11
2 files changed, 2 insertions, 15 deletions
diff --git a/ggml/include/ggml-backend.h b/ggml/include/ggml-backend.h
index 621620bc..5f3f1e28 100644
--- a/ggml/include/ggml-backend.h
+++ b/ggml/include/ggml-backend.h
@@ -232,12 +232,6 @@ extern "C" {
GGML_API void ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr);
GGML_API void ggml_backend_view_init(struct ggml_tensor * tensor);
- // Utility to query whether cached GGML graph is in use
- GGML_API bool ggml_use_cached_graph(ggml_backend_sched_t sched);
-
- // Set whether or not to use GGML graph caching
- GGML_API void ggml_set_cached_graph(ggml_backend_sched_t sched, bool set_value);
-
#ifdef __cplusplus
}
diff --git a/ggml/include/ggml.h b/ggml/include/ggml.h
index a99dc6b5..5ba77012 100644
--- a/ggml/include/ggml.h
+++ b/ggml/include/ggml.h
@@ -401,8 +401,8 @@ extern "C" {
GGML_TYPE_IQ4_K = 139,
GGML_TYPE_IQ5_K = 140,
GGML_TYPE_IQ6_K = 141,
- GGML_TYPE_IQ2_TN = 142,
- GGML_TYPE_IQ1_TN = 143,
+ // depricated: GGML_TYPE_IQ2_TN = 142,
+ // depricated: GGML_TYPE_IQ1_TN = 143,
GGML_TYPE_IQ4_KS = 144,
GGML_TYPE_IQ2_KS = 145,
GGML_TYPE_IQ4_KSS = 146,
@@ -597,13 +597,6 @@ extern "C" {
GGML_TENSOR_FLAG_PARAM = 4,
};
- // Flag (used on GGML_OP_CPY nodes) on whether node is associated with K or V cache
- enum ggml_kv_cache_flag {
- GGML_KV_CACHE_FLAG_NONE = 0,
- GGML_KV_CACHE_FLAG_K = 1,
- GGML_KV_CACHE_FLAG_V = 2
- };
-
// ggml object
struct ggml_object {
size_t offs;