summaryrefslogtreecommitdiff
path: root/llama.cpp
diff options
context:
space:
mode:
authorMasaya, Kato <62578291+msy-kato@users.noreply.github.com>2024-05-25 17:42:31 +0900
committerGitHub <noreply@github.com>2024-05-25 11:42:31 +0300
commitfaa0e6979a11dcb731e9d778ad42ceaa0302015e (patch)
tree88aeb420c934fa26eb4ca08fa629300b5e171ea0 /llama.cpp
parent9791f402580838d7f8543ae7bc633ef265e436f0 (diff)
ggml: aarch64: SVE kernels for q8_0_q8_0, q4_0_q8_0 vector dot (#7433)
* Add SVE support for q4_0_q8_0 q8_0_q8_0 * remove ifdef
Diffstat (limited to 'llama.cpp')
-rw-r--r--llama.cpp1
1 files changed, 1 insertions, 0 deletions
diff --git a/llama.cpp b/llama.cpp
index 3c9fe15b..85cb3140 100644
--- a/llama.cpp
+++ b/llama.cpp
@@ -18337,6 +18337,7 @@ const char * llama_print_system_info(void) {
s += "AVX512_BF16 = " + std::to_string(ggml_cpu_has_avx512_bf16()) + " | ";
s += "FMA = " + std::to_string(ggml_cpu_has_fma()) + " | ";
s += "NEON = " + std::to_string(ggml_cpu_has_neon()) + " | ";
+ s += "SVE = " + std::to_string(ggml_cpu_has_sve()) + " | ";
s += "ARM_FMA = " + std::to_string(ggml_cpu_has_arm_fma()) + " | ";
s += "F16C = " + std::to_string(ggml_cpu_has_f16c()) + " | ";
s += "FP16_VA = " + std::to_string(ggml_cpu_has_fp16_va()) + " | ";