diff options
author | afrideva <95653597+afrideva@users.noreply.github.com> | 2023-11-13 17:03:40 -0800 |
---|---|---|
committer | GitHub <noreply@github.com> | 2023-11-13 18:03:40 -0700 |
commit | b46d12f86d56bef3dc8b596dfb3d22f3b08102be (patch) | |
tree | 3ed6dde7eccf57cfd56484f064ada3aec332e04a | |
parent | bd90eca237b498dd106d315dcb9ad3e6fae3906f (diff) |
convert.py: also look for plain model.safetensors (#4043)
* add safetensors to convert.py help message
* Check for single-file safetensors model
* Update convert.py "model" option help message
* revert convert.py help message change
-rwxr-xr-x | convert.py | 5 |
1 files changed, 3 insertions, 2 deletions
@@ -1036,7 +1036,8 @@ def load_some_model(path: Path) -> ModelPlus: # Be extra-friendly and accept either a file or a directory: if path.is_dir(): # Check if it's a set of safetensors files first - files = list(path.glob("model-00001-of-*.safetensors")) + globs = ["model-00001-of-*.safetensors", "model.safetensors"] + files = [file for glob in globs for file in path.glob(glob)] if not files: # Try the PyTorch patterns too, with lower priority globs = ["consolidated.00.pth", "pytorch_model-00001-of-*.bin", "*.pt", "pytorch_model.bin"] @@ -1123,7 +1124,7 @@ def main(args_in: list[str] | None = None) -> None: parser.add_argument("--outtype", choices=output_choices, help="output format - note: q8_0 may be very slow (default: f16 or f32 based on input)") parser.add_argument("--vocab-dir", type=Path, help="directory containing tokenizer.model, if separate from model file") parser.add_argument("--outfile", type=Path, help="path to write to; default: based on input") - parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin)") + parser.add_argument("model", type=Path, help="directory containing model file, or model file itself (*.pth, *.pt, *.bin, *.safetensors)") parser.add_argument("--vocabtype", choices=["spm", "bpe"], help="vocab format (default: spm)", default="spm") parser.add_argument("--ctx", type=int, help="model training context (default: based on input)") parser.add_argument("--concurrency", type=int, help=f"concurrency used for conversion (default: {DEFAULT_CONCURRENCY})", default = DEFAULT_CONCURRENCY) |