diff options
| author | Kawrakow <48489457+ikawrakow@users.noreply.github.com> | 2024-07-27 07:55:01 +0200 |
|---|---|---|
| committer | GitHub <noreply@github.com> | 2024-07-27 07:55:01 +0200 |
| commit | 154e0d75fccf1784fe9ff6fd76a630b66563da3d (patch) | |
| tree | 81ce6dbb5b1900c1aa78a879f0593c694cab9d27 /examples/llava/llava-surgery.py | |
| parent | 0684c3e9c70d49323b4fc517128cbe222cab7f96 (diff) | |
Merge mainline llama.cpp (#3)
* Merging mainline - WIP
* Merging mainline - WIP
AVX2 and CUDA appear to work.
CUDA performance seems slightly (~1-2%) lower as it is so often
the case with llama.cpp/ggml after some "improvements" have been made.
* Merging mainline - fix Metal
* Remove check
---------
Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
Diffstat (limited to 'examples/llava/llava-surgery.py')
| -rw-r--r-- | examples/llava/llava-surgery.py | 38 |
1 files changed, 0 insertions, 38 deletions
diff --git a/examples/llava/llava-surgery.py b/examples/llava/llava-surgery.py deleted file mode 100644 index 4f2da3be..00000000 --- a/examples/llava/llava-surgery.py +++ /dev/null @@ -1,38 +0,0 @@ -import argparse -import glob -import os -import torch - - -ap = argparse.ArgumentParser() -ap.add_argument("-m", "--model", help="Path to LLaVA v1.5 model") -args = ap.parse_args() - -# find the model part that includes the the multimodal projector weights -path = sorted(glob.glob(f"{args.model}/pytorch_model*.bin"))[-1] -checkpoint = torch.load(path) - -# get a list of mm tensor names -mm_tensors = [k for k, v in checkpoint.items() if k.startswith("model.mm_projector")] - -# store these tensors in a new dictionary and torch.save them -projector = {name: checkpoint[name].float() for name in mm_tensors} -torch.save(projector, f"{args.model}/llava.projector") - -# BakLLaVA models contain CLIP tensors in it -clip_tensors = [k for k, v in checkpoint.items() if k.startswith("model.vision_tower")] -if len(clip_tensors) > 0: - clip = {name.replace("vision_tower.vision_tower.", ""): checkpoint[name].float() for name in clip_tensors} - torch.save(clip, f"{args.model}/llava.clip") - - - # added tokens should be removed to be able to convert Mistral models - if os.path.exists(f"{args.model}/added_tokens.json"): - with open(f"{args.model}/added_tokens.json", "w") as f: - f.write("{}\n") - - - -print("Done!") -print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.") -print(f"Also, use {args.model}/llava.projector to prepare a llava-encoder.gguf file.") |
