summaryrefslogtreecommitdiff
path: root/flake.nix
diff options
context:
space:
mode:
authorErik Scholz <Green-Sky@users.noreply.github.com>2023-10-28 16:41:07 +0200
committerGitHub <noreply@github.com>2023-10-28 16:41:07 +0200
commitff3bad83e29e3009010cbc923bebd769055eaa7f (patch)
tree39be8f3dc9082ee6bfe685a92e38a6d1b623d09a /flake.nix
parent82a6646e0221216c41edcdf99f5a44bb051391f5 (diff)
flake : update flake.lock for newer transformers version + provide extra dev shell (#3797)
* flake : update flake.lock for newer transformers version + provide extra dev shell with torch and transformers (for most convert-xxx.py scripts)
Diffstat (limited to 'flake.nix')
-rw-r--r--flake.nix7
1 files changed, 7 insertions, 0 deletions
diff --git a/flake.nix b/flake.nix
index cfc4776a..fa34394b 100644
--- a/flake.nix
+++ b/flake.nix
@@ -51,6 +51,9 @@
};
llama-python =
pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece ]);
+ # TODO(Green-Sky): find a better way to opt-into the heavy ml python runtime
+ llama-python-extra =
+ pkgs.python3.withPackages (ps: with ps; [ numpy sentencepiece torchWithoutCuda transformers ]);
postPatch = ''
substituteInPlace ./ggml-metal.m \
--replace '[bundle pathForResource:@"ggml-metal" ofType:@"metal"];' "@\"$out/bin/ggml-metal.metal\";"
@@ -126,5 +129,9 @@
buildInputs = [ llama-python ];
packages = nativeBuildInputs ++ osSpecific;
};
+ devShells.extra = pkgs.mkShell {
+ buildInputs = [ llama-python-extra ];
+ packages = nativeBuildInputs ++ osSpecific;
+ };
});
}