summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorautomaticcat <daogiatuank54@gmail.com>2023-12-30 15:07:48 +0700
committerGitHub <noreply@github.com>2023-12-30 10:07:48 +0200
commit24a447e20af425fa44cf10feaa632b6bb596c80f (patch)
tree66612870e12dd4be3b05f47371c3a0c5e8346da7 /llama.cpp
parenta20f3c7465d6d1b33767757c2760643b799a81bf (diff)
ggml : add ggml_cpu_has_avx_vnni() (#4589)
* feat: add avx_vnni based on intel documents * ggml: add avx vnni based on intel document * llama: add avx vnni information display * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * docs: add more details about using oneMKL and oneAPI for intel processors * Update ggml.c Fix indentation upgate Co-authored-by: Georgi Gerganov <ggerganov@gmail.com> --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp1
1 files changed, 1 insertions, 0 deletions
diff --git a/llama.cpp b/llama.cpp
index 68c7cced..a833d4c1 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -10780,6 +10780,7 @@ const char * llama_print_system_info(void) {
s = "";
s += "AVX = " + std::to_string(ggml_cpu_has_avx()) + " | ";
+ s += "AVX_VNNI = " + std::to_string(ggml_cpu_has_avx_vnni()) + " | ";
s += "AVX2 = " + std::to_string(ggml_cpu_has_avx2()) + " | ";
s += "AVX512 = " + std::to_string(ggml_cpu_has_avx512()) + " | ";
s += "AVX512_VBMI = " + std::to_string(ggml_cpu_has_avx512_vbmi()) + " | ";