summaryrefslogtreecommitdiff
path: root/Package.swift
diff options
context:
space:
mode:
authorKawrakow <48489457+ikawrakow@users.noreply.github.com>2024-07-27 07:55:01 +0200
committerGitHub <noreply@github.com>2024-07-27 07:55:01 +0200
commit154e0d75fccf1784fe9ff6fd76a630b66563da3d (patch)
tree81ce6dbb5b1900c1aa78a879f0593c694cab9d27 /Package.swift
parent0684c3e9c70d49323b4fc517128cbe222cab7f96 (diff)
Merge mainline llama.cpp (#3)
* Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'Package.swift')
-rw-r--r--Package.swift25
1 files changed, 13 insertions, 12 deletions
diff --git a/Package.swift b/Package.swift
index 183e6475..1d90b47b 100644
--- a/Package.swift
+++ b/Package.swift
@@ -3,14 +3,17 @@
import PackageDescription
var sources = [
- "ggml.c",
- "sgemm.cpp",
- "llama.cpp",
- "unicode.cpp",
- "unicode-data.cpp",
- "ggml-alloc.c",
- "ggml-backend.c",
- "ggml-quants.c",
+ "src/llama.cpp",
+ "src/llama-vocab.cpp",
+ "src/llama-grammar.cpp",
+ "src/llama-sampling.cpp",
+ "src/unicode.cpp",
+ "src/unicode-data.cpp",
+ "ggml/src/ggml.c",
+ "ggml/src/ggml-alloc.c",
+ "ggml/src/ggml-backend.c",
+ "ggml/src/ggml-quants.c",
+ "ggml/src/ggml-aarch64.c",
]
var resources: [Resource] = []
@@ -26,8 +29,8 @@ var cSettings: [CSetting] = [
]
#if canImport(Darwin)
-sources.append("ggml-metal.m")
-resources.append(.process("ggml-metal.metal"))
+sources.append("ggml/src/ggml-metal.m")
+resources.append(.process("ggml/src/ggml-metal.metal"))
linkerSettings.append(.linkedFramework("Accelerate"))
cSettings.append(
contentsOf: [
@@ -63,8 +66,6 @@ let package = Package(
"models",
"tests",
"CMakeLists.txt",
- "ggml-cuda.cu",
- "ggml-cuda.h",
"Makefile"
],
sources: sources,