diff options
author | Abhishek Gopinath K <31348521+overtunned@users.noreply.github.com> | 2024-04-03 21:12:52 +0530 |
---|---|---|
committer | GitHub <noreply@github.com> | 2024-04-03 11:42:52 -0400 |
commit | db214fa578e00b01e0884fc2725c9349608bdab5 (patch) | |
tree | 36134d290b3828b781dac2ca853bb9b85d273abb /convert-hf-to-gguf.py | |
parent | 1ff4d9f3d683f02ef8a12e04bfac84300c44bd3a (diff) |
Missing tokenizer.model error during gguf conversion (#6443)
Co-authored-by: Jared Van Bortel <jared@nomic.ai>
Diffstat (limited to 'convert-hf-to-gguf.py')
-rwxr-xr-x | convert-hf-to-gguf.py | 3 |
1 files changed, 1 insertions, 2 deletions
diff --git a/convert-hf-to-gguf.py b/convert-hf-to-gguf.py index afa034a8..bca1c2d7 100755 --- a/convert-hf-to-gguf.py +++ b/convert-hf-to-gguf.py @@ -323,8 +323,7 @@ class Model(ABC): toktypes: list[int] = [] if not tokenizer_path.is_file(): - print(f'Error: Missing {tokenizer_path}', file=sys.stderr) - sys.exit(1) + raise FileNotFoundError(f"File not found: {tokenizer_path}") tokenizer = SentencePieceProcessor(str(tokenizer_path)) vocab_size = self.hparams.get('vocab_size', tokenizer.vocab_size()) |