summaryrefslogtreecommitdiff
path: root/llama-cpp-cpu.nix
blob: e41c1121a26d0cd86c0fd3e4645cd0c04ab8a795 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
{ inputs }:
self: super: {
  llama-cpp-cpu-git =
    with super;
    (llama-cpp.override {
      #      stdenv = llvmPackages.stdenv;
      vulkanSupport = false;
      blasSupport = true;
      rpcSupport = true;

    }).overrideAttrs
      (old: rec {
        version = "git";
        src = fetchGit {
          url = "https://github.com/ggerganov/llama.cpp";
          ref = "master";
          submodules = true;
          #            rev = "";
        };
        #          buildInputs = old.buildInputs ++ [ vpl-gpu-rt libvpl ];
        #          patches = [ /home/sss/.config/nixpkgs/patches/libaom-cpp-revert_dry_sampler.patch ];
        postPatch = "";
        postInstall = "";
        NIX_CFLAGS_COMPILE = "-mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only -O3";
        #          NIX_CXXFLAGS_COMPILE = "-std=c++20 -Ofast -mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only";
        NIX_CXXFLAGS_COMPILE = "-mavx -mavx2 -mssse3 -mfma -mf16c -fno-finite-math-only -O3";
        cmakeFlags = old.cmakeFlags ++ [
          "-DGGML_VULKAN:BOOL=FALSE"
          "-DGGML_BLAS:BOOL=TRUE"
          "-DGGML_CPU_AARCH64=OFF"
          #            "-DGGML_SYCL:BOOL=TRUE"
          #            "-DGGML_SYCL_F16:BOOL=TRUE"
        ];
        buildInputs =
          with llvmPackages;
          old.buildInputs
          ++ [
            shaderc
            openmp
          ];
      });
}