1import argparse
 2import glob
 3import os
 4import torch
 5
 6
 7ap = argparse.ArgumentParser()
 8ap.add_argument("-m", "--model", help="Path to LLaVA v1.5 model")
 9args = ap.parse_args()
10
11# find the model part that includes the the multimodal projector weights
12path = sorted(glob.glob(f"{args.model}/pytorch_model*.bin"))[-1]
13checkpoint = torch.load(path)
14
15# get a list of mm tensor names
16mm_tensors = [k for k, v in checkpoint.items() if k.startswith("model.mm_projector")]
17
18# store these tensors in a new dictionary and torch.save them
19projector = {name: checkpoint[name].float() for name in mm_tensors}
20torch.save(projector, f"{args.model}/llava.projector")
21
22# BakLLaVA models contain CLIP tensors in it
23clip_tensors = [k for k, v in checkpoint.items() if k.startswith("model.vision_tower")]
24if len(clip_tensors) > 0:
25    clip = {name.replace("vision_tower.vision_tower.", ""): checkpoint[name].float() for name in clip_tensors}
26    torch.save(clip, f"{args.model}/llava.clip")
27
28
29    # added tokens should be removed to be able to convert Mistral models
30    if os.path.exists(f"{args.model}/added_tokens.json"):
31        with open(f"{args.model}/added_tokens.json", "w") as f:
32            f.write("{}\n")
33
34
35
36print("Done!")
37print(f"Now you can convert {args.model} to a regular LLaMA GGUF file.")
38print(f"Also, use {args.model}/llava.projector to prepare a llava-encoder.gguf file.")