blob: fe35381e550eecc984dd28ac8483af13cb80caf5 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
|
{ inputs }:
self: super: {
llama-cpp-vulkan-git =
with super;
(llama-cpp.override {
# stdenv = llvmPackages.stdenv;
vulkanSupport = true;
blasSupport = true;
}).overrideAttrs
(old: rec {
version = "git";
src = fetchGit {
url = "https://github.com/ggerganov/llama.cpp";
ref = "master";
submodules = true;
# rev = "";
};
# buildInputs = old.buildInputs ++ [ vpl-gpu-rt libvpl ];
# patches = [ /home/sss/.config/nixpkgs/patches/libaom-cpp-revert_dry_sampler.patch ];
postPatch = "";
postInstall = "";
NIX_CFLAGS_COMPILE = "-mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only -O3";
# NIX_CXXFLAGS_COMPILE = "-std=c++20 -Ofast -mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only";
NIX_CXXFLAGS_COMPILE = "-mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only -O3";
cmakeFlags = old.cmakeFlags ++ [
"-DGGML_VULKAN:BOOL=TRUE"
"-DGGML_BLAS:BOOL=TRUE"
# "-DGGML_SYCL:BOOL=TRUE"
# "-DGGML_SYCL_F16:BOOL=TRUE"
];
buildInputs =
with llvmPackages;
old.buildInputs
++ [
self.shaderc_my
openmp
];
});
}
|