summaryrefslogtreecommitdiff
path: root/examples/llava/convert-image-encoder-to-gguf.py
diff options
context:
space:
mode:
authorM. Yusuf Sarıgöz <yusufsarigoz@gmail.com>2023-10-12 18:23:18 +0300
committerGitHub <noreply@github.com>2023-10-12 18:23:18 +0300
commit370359e5baf619f3a8d461023143d1494b1e8fde (patch)
treeacfd94911cdb83780f7afc3a703b8abb31aa00e2 /examples/llava/convert-image-encoder-to-gguf.py
parent9e24cc6e2e589d405bd1720c400f5b0b9d0ca3ee (diff)
examples: support LLaVA v1.5 (multimodal model) (#3436)
* WIP: start implementing LLaVA * rm scratch buf for now, will revert after cleanup * LLaVA image encoder is working. will combine with llama * Add llava inference code, but it's buggy. debugging * LLaVA is working e2e, needs to optimize memory allocation + cleanup * Use ggml_allocr + rm unnecessary code * fix: crlf -> lf * fix: new line at EoF * fix: trailing whitespace * Add readme * Update readme * Some cleanup * Are you happy editorconfig? * rm unused batch image preprocessing * rm unused import * fix: rm designated initializers * introduce pad-to-square mode for non-square images * are you happy editorconfig? * gitignore /llava * Handle cases where image file does not exist * add llava target to Makefile * add support for 13b model variant * Maybe seed is unlucky? * Check if apples are compared to apples * are you happy editorconfig? * Use temperature = 0.1 by default * command line: use gpt_params_parse() * minor * handle default n_predict * fix typo * llava : code formatting, rename files, fix compile warnings * do not use Wno-cast-qual for MSVC --------- Co-authored-by: Georgi Gerganov <ggerganov@gmail.com>
Diffstat (limited to 'examples/llava/convert-image-encoder-to-gguf.py')
-rw-r--r--examples/llava/convert-image-encoder-to-gguf.py250
1 files changed, 250 insertions, 0 deletions
diff --git a/examples/llava/convert-image-encoder-to-gguf.py b/examples/llava/convert-image-encoder-to-gguf.py
new file mode 100644
index 00000000..2f5eef19
--- /dev/null
+++ b/examples/llava/convert-image-encoder-to-gguf.py
@@ -0,0 +1,250 @@
+import argparse
+import os
+import json
+
+import torch
+import numpy as np
+from gguf import *
+from transformers import CLIPModel, CLIPProcessor
+
+TEXT = "clip.text"
+VISION = "clip.vision"
+
+
+def k(raw_key: str, arch: str) -> str:
+ return raw_key.format(arch=arch)
+
+
+def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_llava: bool) -> bool:
+ if name in (
+ "logit_scale",
+ "text_model.embeddings.position_ids",
+ "vision_model.embeddings.position_ids",
+ ):
+ return True
+
+ if has_llava and name in ["visual_projection.weight", "vision_model.post_layernorm.weight", "vision_model.post_layernorm.bias"]:
+ return True
+
+ if name.startswith("v") and not has_vision:
+ return True
+
+ if name.startswith("t") and not has_text:
+ return True
+
+ return False
+
+
+def get_tensor_name(name: str) -> str:
+ if "projection" in name:
+ return name
+
+ if "mm_projector" in name:
+ return name.replace("model.mm_projector", "mm")
+
+ return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln")
+
+
+def bytes_to_unicode():
+ """
+ Returns list of utf-8 byte and a corresponding list of unicode strings.
+ The reversible bpe codes work on unicode strings.
+ This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
+ When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
+ This is a signficant percentage of your normal, say, 32K bpe vocab.
+ To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
+ And avoids mapping to whitespace/control characters the bpe code barfs on.
+ """
+ bs = (
+ list(range(ord("!"), ord("~") + 1))
+ + list(range(ord("¡"), ord("¬") + 1))
+ + list(range(ord("®"), ord("ÿ") + 1))
+ )
+ cs = bs[:]
+ n = 0
+ for b in range(2**8):
+ if b not in bs:
+ bs.append(b)
+ cs.append(2**8 + n)
+ n += 1
+ cs = [chr(n) for n in cs]
+ return dict(zip(bs, cs))
+
+
+ap = argparse.ArgumentParser(prog="convert_hf_to_gguf.py")
+ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True)
+ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16")
+ap.add_argument("--text-only", action="store_true", required=False,
+ help="Save a text-only model. It can't be used to encode images")
+ap.add_argument("--vision-only", action="store_true", required=False,
+ help="Save a vision-only model. It can't be used to encode texts")
+ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
+ap.add_argument("--image-mean", nargs=3, type=float, required=False, help="Override image mean values")
+ap.add_argument("--image-std", nargs=3, type=float, required=False, help="Override image std values")
+ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
+
+args = ap.parse_args()
+
+
+if args.text_only and args.vision_only:
+ print("--text-only and --image-only arguments cannot be specified at the same time.")
+ exit(1)
+
+if args.use_f32:
+ print("WARNING: Weights for the convolution op is always saved in f16, as the convolution op in GGML does not support 32-bit kernel weights yet.")
+
+# output in the same directory as the model if output_dir is None
+dir_model = args.model_dir
+
+
+with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f:
+ vocab = json.load(f)
+ tokens = [key for key in vocab]
+
+with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
+ config = json.load(f)
+ v_hparams = config["vision_config"]
+ t_hparams = config["text_config"]
+
+# possible data types
+# ftype == 0 -> float32
+# ftype == 1 -> float16
+#
+# map from ftype to string
+ftype_str = ["f32", "f16"]
+
+ftype = 1
+if args.use_f32:
+ ftype = 0
+
+
+model = CLIPModel.from_pretrained(dir_model)
+processor = CLIPProcessor.from_pretrained(dir_model)
+
+fname_middle = None
+has_text_encoder = True
+has_vision_encoder = True
+has_llava_projector = False
+if args.text_only:
+ fname_middle = "text-"
+ has_vision_encoder = False
+elif args.vision_only:
+ fname_middle = "vision-"
+ has_text_encoder = False
+elif args.llava_projector is not None:
+ fname_middle = "mmproj-"
+ has_text_encoder = False
+ has_llava_projector = True
+else:
+ fname_middle = ""
+
+output_dir = args.output_dir if args.output_dir is not None else dir_model
+os.makedirs(output_dir, exist_ok=True)
+output_prefix = os.path.basename(output_dir).replace("ggml_", "")
+fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf")
+fout = GGUFWriter(path=fname_out, arch="clip")
+
+fout.add_bool("clip.has_text_encoder", has_text_encoder)
+fout.add_bool("clip.has_vision_encoder", has_vision_encoder)
+fout.add_bool("clip.has_llava_projector", has_llava_projector)
+fout.add_file_type(ftype)
+model_name = config["_name_or_path"] if "_name_or_path" in config else os.path.basename(dir_model)
+fout.add_name(model_name)
+if args.text_only:
+ fout.add_description("text-only CLIP model")
+elif args.vision_only and not has_llava_projector:
+ fout.add_description("vision-only CLIP model")
+elif has_llava_projector:
+ fout.add_description("image encoder for LLaVA")
+else:
+ fout.add_description("two-tower CLIP model")
+
+if has_text_encoder:
+ # text_model hparams
+ fout.add_uint32(k(KEY_CONTEXT_LENGTH, TEXT), t_hparams["max_position_embeddings"])
+ fout.add_uint32(k(KEY_EMBEDDING_LENGTH, TEXT), t_hparams["hidden_size"])
+ fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, TEXT), t_hparams["intermediate_size"])
+ fout.add_uint32("clip.text.projection_dim", t_hparams.get("projection_dim", config["projection_dim"]))
+ fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, TEXT), t_hparams["num_attention_heads"])
+ fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, TEXT), t_hparams["layer_norm_eps"])
+ fout.add_uint32(k(KEY_BLOCK_COUNT, TEXT), t_hparams["num_hidden_layers"])
+ fout.add_token_list(tokens)
+
+if has_vision_encoder:
+ # vision_model hparams
+ fout.add_uint32("clip.vision.image_size", v_hparams["image_size"])
+ fout.add_uint32("clip.vision.patch_size", v_hparams["patch_size"])
+ fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), v_hparams["hidden_size"])
+ fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), v_hparams["intermediate_size"])
+ fout.add_uint32("clip.vision.projection_dim", v_hparams.get("projection_dim", config["projection_dim"]))
+ fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), v_hparams["num_attention_heads"])
+ fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), v_hparams["layer_norm_eps"])
+ block_count = v_hparams["num_hidden_layers"] - 1 if has_llava_projector else v_hparams["num_hidden_layers"]
+ fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), block_count)
+
+ image_mean = processor.image_processor.image_mean if args.image_mean is None else args.image_mean
+ image_std = processor.image_processor.image_std if args.image_std is None else args.image_std
+ fout.add_array("clip.vision.image_mean", image_mean)
+ fout.add_array("clip.vision.image_std", image_std)
+
+use_gelu = v_hparams["hidden_act"] == "gelu"
+fout.add_bool("clip.use_gelu", use_gelu)
+
+
+if has_llava_projector:
+ model.vision_model.encoder.layers.pop(-1)
+ projector = torch.load(args.llava_projector)
+ for name, data in projector.items():
+ name = get_tensor_name(name)
+ if data.ndim == 2:
+ data = data.squeeze().numpy().astype(np.float16)
+ else:
+ data = data.squeeze().numpy().astype(np.float32)
+
+ fout.add_tensor(name, data)
+
+ print("Projector tensors added\n")
+
+state_dict = model.state_dict()
+for name, data in state_dict.items():
+ if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_llava_projector):
+ # we don't need this
+ print(f"skipping parameter: {name}")
+ continue
+
+ name = get_tensor_name(name)
+ data = data.squeeze().numpy()
+
+ n_dims = len(data.shape)
+
+ # ftype == 0 -> float32, ftype == 1 -> float16
+ ftype_cur = 0
+ if n_dims == 4:
+ print(f"tensor {name} is always saved in f16")
+ data = data.astype(np.float16)
+ ftype_cur = 1
+ elif ftype == 1:
+ if name[-7:] == ".weight" and n_dims == 2:
+ print(" Converting to float16")
+ data = data.astype(np.float16)
+ ftype_cur = 1
+ else:
+ print(" Converting to float32")
+ data = data.astype(np.float32)
+ ftype_cur = 0
+ else:
+ if data.dtype != np.float32:
+ print(" Converting to float32")
+ data = data.astype(np.float32)
+ ftype_cur = 0
+
+ print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}")
+ fout.add_tensor(name, data)
+
+
+fout.write_header_to_file()
+fout.write_kv_data_to_file()
+fout.write_tensors_to_file()
+fout.close()
+
+print("Done. Output file: " + fname_out)