summaryrefslogtreecommitdiff
path: root/examples/llava/convert-image-encoder-to-gguf.py
diff options
context:
space:
mode:
authorJohn <78893154+cmp-nct@users.noreply.github.com>2024-02-14 08:38:35 +0100
committerGitHub <noreply@github.com>2024-02-14 09:38:35 +0200
commitaa2341298924ac89778252015efcb792f2df1e20 (patch)
tree1b7702dd6cf16b25495b6acf87467106ab2b75e0 /examples/llava/convert-image-encoder-to-gguf.py
parentf5ca054855dea83f424003162f26de376e5643f6 (diff)
llava : support v1.6 (#5267)
* Create llava-survery-v2.py * Update convert-image-encoder-to-gguf.py * Update convert-image-encoder-to-gguf.py * Rename llava-survery-v2.py to llava-surgery-v2.py * Update convert-image-encoder-to-gguf.py will now search for projector * Update convert-image-encoder-to-gguf.py whoops * Update llava-surgery-v2.py * Clip: Bugfix for normalization (it did not loat the 3 std and mean values) Clip: bicubic resize function Clip: added save-to-bmp/pil for debugging and conversion from/to 32/8 images Clip: added normalization with FP16 precision simulation (image tensors match HF implementation, can be switched off, only used for llava-1.6) Clip: added newline tensor, mergetype kv, image-grid kv, new resize-pad function with resolution from gridpoints Clip: clip_image_preprocess now returns a float * vector instead of float, this way llava 1.5 and 1.6 is supported llava: added ggml cpu graph for embedding patching, added spatial_unpad preliminary support, added a lot of comments that need to be cleaned when all is final convert-image-encoder: fixed image-grid flattening * whitespace corrections * ws * Tensors are now properly permuted. Before the embeddings were inserted 1:1, now they are split into the 24x24 patches as in reference. * ws * added verbose_prompt support into cli added stopwords for llava-1.6 into cli * moved llava functions to llava.cpp, made clip.h C compatible API, replaced vector style functions with pointers, added a debug define to remove functions from compilation while not needed * ws * convert : skip unknown tensors (need for LLaVA) * llava : update readme * llava : fix compile warnings * llava : style * convert : add --skip-unknown CLI arg * server : remove clip structs * bugfix for non llava-1.6 It should now work with llava-1.5 as well * clip : minor code rearrange * llava : update readme a bit --------- Co-authored-by: John <cmt-nct@users.noreply.github.com> Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'examples/llava/convert-image-encoder-to-gguf.py')
-rw-r--r--examples/llava/convert-image-encoder-to-gguf.py66
1 files changed, 59 insertions, 7 deletions
diff --git a/examples/llava/convert-image-encoder-to-gguf.py b/examples/llava/convert-image-encoder-to-gguf.py
index e204b56b..c69f89ac 100644
--- a/examples/llava/convert-image-encoder-to-gguf.py
+++ b/examples/llava/convert-image-encoder-to-gguf.py
@@ -78,18 +78,19 @@ ap.add_argument("--text-only", action="store_true", required=False,
help="Save a text-only model. It can't be used to encode images")
ap.add_argument("--vision-only", action="store_true", required=False,
help="Save a vision-only model. It can't be used to encode texts")
-ap.add_argument("--clip_model_is_vision", action="store_true", required=False,
+ap.add_argument("--clip-model-is-vision", action="store_true", required=False,
help="The clip model is a pure vision model (ShareGPT4V vision extract for example)")
+ap.add_argument("--clip-model-is-openclip", action="store_true", required=False,
+ help="The clip model is from openclip (for ViT-SO400M type))")
ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp", choices=["mlp", "ldp"], default="mlp")
-ap.add_argument("--image-mean", nargs=3, type=float, required=False, help="Override image mean values")
-ap.add_argument("--image-std", nargs=3, type=float, required=False, help="Override image std values")
ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
+# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
default_image_mean = [0.48145466, 0.4578275, 0.40821073]
default_image_std = [0.26862954, 0.26130258, 0.27577711]
-ap.add_argument('--image_mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
-ap.add_argument('--image_std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
+ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
+ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
# with proper
args = ap.parse_args()
@@ -105,7 +106,7 @@ if args.use_f32:
# output in the same directory as the model if output_dir is None
dir_model = args.model_dir
-if args.clip_model_is_vision:
+if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip:
vocab = None
tokens = None
else:
@@ -133,7 +134,7 @@ ftype = 1
if args.use_f32:
ftype = 0
-if args.clip_model_is_vision:
+if args.clip_model_is_vision or args.clip_model_is_openclip:
model = CLIPVisionModel.from_pretrained(dir_model)
processor = None
else:
@@ -202,6 +203,57 @@ if has_vision_encoder:
fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), v_hparams["layer_norm_eps"])
block_count = v_hparams["num_hidden_layers"] - 1 if has_llava_projector else v_hparams["num_hidden_layers"]
fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), block_count)
+ # /**
+ # "image_grid_pinpoints": [
+ # [
+ # 336,
+ # 672
+ # ],
+ # [
+ # 672,
+ # 336
+ # ],
+ # [
+ # 672,
+ # 672
+ # ],
+ # [
+ # 1008,
+ # 336
+ # ],
+ # [
+ # 336,
+ # 1008
+ # ]
+ # ],
+ # Flattened:
+ # [
+ # 336, 672,
+ # 672, 336,
+ # 672, 672,
+ # 1008, 336,
+ # 336, 1008
+ # ]
+ # *
+ # */
+ if "image_grid_pinpoints" in v_hparams:
+ # flatten it
+ image_grid_pinpoints = []
+ for pinpoint in v_hparams["image_grid_pinpoints"]:
+ for p in pinpoint:
+ image_grid_pinpoints.append(p)
+ fout.add_array("clip.vision.image_grid_pinpoints", image_grid_pinpoints)
+ if "image_crop_resolution" in v_hparams:
+ fout.add_uint32("clip.vision.image_crop_resolution", v_hparams["image_crop_resolution"])
+ if "image_aspect_ratio" in v_hparams:
+ fout.add_string("clip.vision.image_aspect_ratio", v_hparams["image_aspect_ratio"])
+ if "image_split_resolution" in v_hparams:
+ fout.add_uint32("clip.vision.image_split_resolution", v_hparams["image_split_resolution"])
+ if "mm_patch_merge_type" in v_hparams:
+ fout.add_string("clip.vision.mm_patch_merge_type", v_hparams["mm_patch_merge_type"])
+ if "mm_projector_type" in v_hparams:
+ fout.add_string("clip.vision.mm_projector_type", v_hparams["mm_projector_type"])
+
if processor is not None:
image_mean = processor.image_processor.image_mean if args.image_mean is None or args.image_mean == default_image_mean else args.image_mean