1import argparse
  2import os
  3import json
  4import re
  5
  6import torch
  7import numpy as np
  8from gguf import *
  9
 10TEXT = "clip.text"
 11VISION = "clip.vision"
 12from transformers import SiglipVisionModel, SiglipVisionConfig
 13
 14def k(raw_key: str, arch: str) -> str:
 15    return raw_key.format(arch=arch)
 16
 17
 18def should_skip_tensor(name: str, has_text: bool, has_vision: bool, has_llava: bool) -> bool:
 19    if name in (
 20        "logit_scale",
 21        "text_model.embeddings.position_ids",
 22        "vision_model.embeddings.position_ids",
 23    ):
 24        return True
 25
 26    if name in (
 27        "vision_model.head.probe",
 28        "vision_model.head.attention.in_proj_weight",
 29        "vision_model.head.attention.in_proj_bias",
 30        "vision_model.head.attention.out_proj.weight",
 31        "vision_model.head.attention.out_proj.bias",
 32        "vision_model.head.layernorm.weight",
 33        "vision_model.head.layernorm.bias",
 34        "vision_model.head.mlp.fc1.weight",
 35        "vision_model.head.mlp.fc1.bias",
 36        "vision_model.head.mlp.fc2.weight",
 37        "vision_model.head.mlp.fc2.bias"
 38    ):
 39        return True
 40
 41    if name.startswith("v") and not has_vision:
 42        return True
 43
 44    if name.startswith("t") and not has_text:
 45        return True
 46
 47    return False
 48
 49
 50def get_tensor_name(name: str) -> str:
 51    if "projection" in name:
 52        return name
 53    if "mm_projector" in name:
 54        name = name.replace("model.mm_projector", "mm")
 55        name = re.sub(r'mm\.mlp\.mlp', 'mm.model.mlp', name, count=1)
 56        name = re.sub(r'mm\.peg\.peg', 'mm.model.peg', name, count=1)
 57        return name
 58
 59    return name.replace("text_model", "t").replace("vision_model", "v").replace("encoder.layers", "blk").replace("embeddings.", "").replace("_proj", "").replace("self_attn.", "attn_").replace("layer_norm", "ln").replace("layernorm", "ln").replace("mlp.fc1", "ffn_down").replace("mlp.fc2", "ffn_up").replace("embedding", "embd").replace("final", "post").replace("layrnorm", "ln")
 60
 61
 62def bytes_to_unicode():
 63    """
 64    Returns list of utf-8 byte and a corresponding list of unicode strings.
 65    The reversible bpe codes work on unicode strings.
 66    This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
 67    When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
 68    This is a significant percentage of your normal, say, 32K bpe vocab.
 69    To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
 70    And avoids mapping to whitespace/control characters the bpe code barfs on.
 71    """
 72    bs = (
 73        list(range(ord("!"), ord("~") + 1))
 74        + list(range(ord("¡"), ord("¬") + 1))
 75        + list(range(ord("®"), ord("ÿ") + 1))
 76    )
 77    cs = bs[:]
 78    n = 0
 79    for b in range(2**8):
 80        if b not in bs:
 81            bs.append(b)
 82            cs.append(2**8 + n)
 83            n += 1
 84    cs = [chr(n) for n in cs]
 85    return dict(zip(bs, cs))
 86
 87
 88ap = argparse.ArgumentParser()
 89ap.add_argument("-m", "--model-dir", help="Path to model directory cloned from HF Hub", required=True)
 90ap.add_argument("--use-f32", action="store_true", default=False, help="Use f32 instead of f16")
 91ap.add_argument("--text-only", action="store_true", required=False,
 92                help="Save a text-only model. It can't be used to encode images")
 93ap.add_argument("--vision-only", action="store_true", required=False,
 94                help="Save a vision-only model. It can't be used to encode texts")
 95ap.add_argument("--clip-model-is-vision", action="store_true", required=False,
 96                help="The clip model is a pure vision model (ShareGPT4V vision extract for example)")
 97ap.add_argument("--clip-model-is-openclip", action="store_true", required=False,
 98                help="The clip model is from openclip (for ViT-SO400M type))")
 99ap.add_argument("--llava-projector", help="Path to llava.projector file. If specified, save an image encoder for LLaVA models.")
100ap.add_argument("--projector-type", help="Type of projector. Possible values: mlp, ldp, ldpv2", choices=["mlp", "ldp", "ldpv2","adapter"], default="adapter")
101ap.add_argument("-o", "--output-dir", help="Directory to save GGUF files. Default is the original model directory", default=None)
102# Example --image_mean 0.48145466 0.4578275 0.40821073 --image_std 0.26862954 0.26130258 0.27577711
103# Example --image_mean 0.5 0.5 0.5 --image_std 0.5 0.5 0.5
104default_image_mean = [0.5, 0.5, 0.5]
105default_image_std = [0.5, 0.5, 0.5]
106ap.add_argument('--image-mean', type=float, nargs='+', help='Mean of the images for normalization (overrides processor) ', default=None)
107ap.add_argument('--image-std', type=float, nargs='+', help='Standard deviation of the images for normalization (overrides processor)', default=None)
108
109# with proper
110args = ap.parse_args()
111
112
113if args.text_only and args.vision_only:
114    print("--text-only and --image-only arguments cannot be specified at the same time.")
115    exit(1)
116
117if args.use_f32:
118    print("WARNING: Weights for the convolution op is always saved in f16, as the convolution op in GGML does not support 32-bit kernel weights yet.")
119
120# output in the same directory as the model if output_dir is None
121dir_model = args.model_dir
122
123if args.clip_model_is_vision or not os.path.exists(dir_model + "/vocab.json") or args.clip_model_is_openclip:
124    vocab = None
125    tokens = None
126else:
127    with open(dir_model + "/vocab.json", "r", encoding="utf-8") as f:
128        vocab = json.load(f)
129        tokens = [key for key in vocab]
130
131with open(dir_model + "/config.json", "r", encoding="utf-8") as f:
132    config = json.load(f)
133    if args.clip_model_is_vision:
134        v_hparams = config
135        t_hparams = None
136    else:
137        v_hparams = config["vision_config"]
138        t_hparams = None
139
140# possible data types
141#   ftype == 0 -> float32
142#   ftype == 1 -> float16
143#
144# map from ftype to string
145ftype_str = ["f32", "f16"]
146
147ftype = 1
148if args.use_f32:
149    ftype = 0
150
151vision_config = SiglipVisionConfig(**v_hparams)
152model = SiglipVisionModel(vision_config)
153model.load_state_dict(torch.load(os.path.join(dir_model, "glm.clip")))
154
155fname_middle = None
156has_text_encoder = False
157has_vision_encoder = True
158has_glm_projector = True
159if args.text_only:
160    fname_middle = "text-"
161    has_vision_encoder = False
162elif args.llava_projector is not None:
163    fname_middle = "mmproj-"
164    has_text_encoder = False
165    has_glm_projector = True
166elif args.vision_only:
167    fname_middle = "vision-"
168    has_text_encoder = False
169else:
170    fname_middle = ""
171
172output_dir = args.output_dir if args.output_dir is not None else dir_model
173os.makedirs(output_dir, exist_ok=True)
174output_prefix = os.path.basename(output_dir).replace("ggml_", "")
175fname_out = os.path.join(output_dir, f"{fname_middle}model-{ftype_str[ftype]}.gguf")
176fout = GGUFWriter(path=fname_out, arch="clip")
177
178fout.add_bool("clip.has_text_encoder", has_text_encoder)
179fout.add_bool("clip.has_vision_encoder", has_vision_encoder)
180fout.add_bool("clip.has_glm_projector", has_glm_projector)
181fout.add_file_type(ftype)
182model_name = config["_name_or_path"] if "_name_or_path" in config else os.path.basename(dir_model)
183fout.add_name(model_name)
184if has_glm_projector:
185    fout.add_description("image encoder for glm4v")
186    fout.add_string("clip.projector_type", "adapter")
187else:
188    fout.add_description("two-tower CLIP model")
189
190if has_text_encoder:
191    assert t_hparams is not None
192    assert tokens is not None
193    # text_model hparams
194    fout.add_uint32(k(KEY_CONTEXT_LENGTH, TEXT), t_hparams["max_position_embeddings"])
195    fout.add_uint32(k(KEY_EMBEDDING_LENGTH, TEXT), t_hparams["hidden_size"])
196    fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, TEXT), t_hparams["intermediate_size"])
197    fout.add_uint32("clip.text.projection_dim", t_hparams.get("projection_dim", config["projection_dim"]))
198    fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, TEXT), t_hparams["num_attention_heads"])
199    fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, TEXT), t_hparams["layer_norm_eps"])
200    fout.add_uint32(k(KEY_BLOCK_COUNT, TEXT), t_hparams["num_hidden_layers"])
201    fout.add_token_list(tokens)
202
203if has_vision_encoder:
204    # vision_model hparams
205    fout.add_uint32("clip.vision.image_size", v_hparams["image_size"])
206    fout.add_uint32("clip.vision.patch_size", v_hparams["patch_size"])
207    fout.add_uint32(k(KEY_EMBEDDING_LENGTH, VISION), v_hparams["hidden_size"])
208    fout.add_uint32(k(KEY_FEED_FORWARD_LENGTH, VISION), v_hparams["intermediate_size"])
209    fout.add_uint32("clip.vision.projection_dim", 0)
210    fout.add_uint32(k(KEY_ATTENTION_HEAD_COUNT, VISION), v_hparams["num_attention_heads"])
211    fout.add_float32(k(KEY_ATTENTION_LAYERNORM_EPS, VISION), 1e-6)
212    fout.add_uint32(k(KEY_BLOCK_COUNT, VISION), v_hparams["num_hidden_layers"])
213
214    image_mean = args.image_mean if args.image_mean is not None else default_image_mean
215    image_std = args.image_std if args.image_std is not None else default_image_std
216    fout.add_array("clip.vision.image_mean", image_mean)
217    fout.add_array("clip.vision.image_std", image_std)
218
219fout.add_bool("clip.use_gelu", True)
220
221
222if has_glm_projector:
223    # model.vision_model.encoder.layers.pop(-1)  # pyright: ignore[reportAttributeAccessIssue]
224    projector = torch.load(args.llava_projector)
225    for name, data in projector.items():
226        name = get_tensor_name(name)
227        # pw and dw conv ndim==4
228        if data.ndim == 2 or data.ndim == 4:
229            data = data.squeeze().numpy().astype(np.float16)
230        else:
231            data = data.squeeze().numpy().astype(np.float32)
232        if name.startswith("vision."):
233            name=name.replace("vision.","")
234        fout.add_tensor(name, data)
235        print(f"Projector {name} - {data.dtype} - shape = {data.shape}")
236        # print(f"Projector {name} tensors added\n")
237
238state_dict = model.state_dict()  # pyright: ignore[reportAttributeAccessIssue]
239for name, data in state_dict.items():
240    if should_skip_tensor(name, has_text_encoder, has_vision_encoder, has_glm_projector):
241        # we don't need this
242        print(f"skipping parameter: {name}")
243        continue
244
245    name = get_tensor_name(name)
246    data = data.squeeze().numpy()
247
248    n_dims = len(data.shape)
249
250    # ftype == 0 -> float32, ftype == 1 -> float16
251    ftype_cur = 0
252    if n_dims == 4:
253        print(f"tensor {name} is always saved in f16")
254        data = data.astype(np.float16)
255        ftype_cur = 1
256    elif ftype == 1:
257        if name[-7:] == ".weight" and n_dims == 2:
258            # print("  Converting to float16")
259            data = data.astype(np.float16)
260            ftype_cur = 1
261        else:
262            # print("  Converting to float32")
263            data = data.astype(np.float32)
264            ftype_cur = 0
265    else:
266        if data.dtype != np.float32:
267            # print("  Converting to float32")
268            data = data.astype(np.float32)
269            ftype_cur = 0
270    print(f"siglip {name} - {data.dtype} - shape = {data.shape}")
271    # print(f"{name} - {ftype_str[ftype_cur]} - shape = {data.shape}")
272    fout.add_tensor(name, data)
273
274
275fout.write_header_to_file()
276fout.write_kv_data_to_file()
277fout.write_tensors_to_file()
278fout.close()
279
280print("Done. Output file: " + fname_out)