diff options
author | sss <sss@dark-alexandr.net> | 2025-07-01 02:03:23 +0300 |
---|---|---|
committer | sss <sss@dark-alexandr.net> | 2025-07-01 02:03:23 +0300 |
commit | 188f69443c0873970d41217b52e11c46d584c06d (patch) | |
tree | eab9f51137f212c7bfe7e512490748dae371b698 /llama-cpp-cpu.nix |
nix common overlays initial commit
Diffstat (limited to 'llama-cpp-cpu.nix')
-rw-r--r-- | llama-cpp-cpu.nix | 42 |
1 files changed, 42 insertions, 0 deletions
diff --git a/llama-cpp-cpu.nix b/llama-cpp-cpu.nix new file mode 100644 index 0000000..e41c112 --- /dev/null +++ b/llama-cpp-cpu.nix @@ -0,0 +1,42 @@ +{ inputs }: +self: super: { + llama-cpp-cpu-git = + with super; + (llama-cpp.override { + # stdenv = llvmPackages.stdenv; + vulkanSupport = false; + blasSupport = true; + rpcSupport = true; + + }).overrideAttrs + (old: rec { + version = "git"; + src = fetchGit { + url = "https://github.com/ggerganov/llama.cpp"; + ref = "master"; + submodules = true; + # rev = ""; + }; + # buildInputs = old.buildInputs ++ [ vpl-gpu-rt libvpl ]; + # patches = [ /home/sss/.config/nixpkgs/patches/libaom-cpp-revert_dry_sampler.patch ]; + postPatch = ""; + postInstall = ""; + NIX_CFLAGS_COMPILE = "-mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only -O3"; + # NIX_CXXFLAGS_COMPILE = "-std=c++20 -Ofast -mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only"; + NIX_CXXFLAGS_COMPILE = "-mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only -O3"; + cmakeFlags = old.cmakeFlags ++ [ + "-DGGML_VULKAN:BOOL=FALSE" + "-DGGML_BLAS:BOOL=TRUE" + "-DGGML_CPU_AARCH64=OFF" + # "-DGGML_SYCL:BOOL=TRUE" + # "-DGGML_SYCL_F16:BOOL=TRUE" + ]; + buildInputs = + with llvmPackages; + old.buildInputs + ++ [ + shaderc + openmp + ]; + }); +} |