1#include "models.h"
  2
  3ggml_cgraph * clip_graph_qwen3vl::build() {
  4    GGML_ASSERT(model.patch_bias != nullptr);
  5    GGML_ASSERT(model.position_embeddings != nullptr);
  6    GGML_ASSERT(model.class_embedding == nullptr);
  7
  8    const int batch_size       = 1;
  9    const int n_pos            = n_patches;
 10    const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
 11
 12    norm_type norm_t = NORM_TYPE_NORMAL;
 13
 14    int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
 15
 16    ggml_tensor * inp_raw = build_inp_raw();
 17    ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
 18
 19    GGML_ASSERT(img.nx % (patch_size * 2) == 0);
 20    GGML_ASSERT(img.ny % (patch_size * 2) == 0);
 21
 22    // second conv dimension
 23    {
 24        auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
 25        inp = ggml_add(ctx0, inp, inp_1);
 26
 27        inp = ggml_permute(ctx0, inp, 1, 2, 0, 3);  // [w, h, c, b] -> [c, w, h, b]
 28        inp = ggml_cont_4d(
 29            ctx0, inp,
 30            n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
 31        inp = ggml_reshape_4d(
 32            ctx0, inp,
 33            n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
 34        inp = ggml_permute(ctx0, inp, 0, 2, 1, 3);
 35        inp = ggml_cont_3d(
 36            ctx0, inp,
 37            n_embd, n_patches_x * n_patches_y, batch_size);
 38    }
 39
 40    // add patch bias
 41    if (model.patch_bias != nullptr) {
 42        inp = ggml_add(ctx0, inp, model.patch_bias);
 43        cb(inp, "patch_bias", -1);
 44    }
 45
 46    // calculate absolute position embedding and apply
 47    ggml_tensor * learned_pos_embd = resize_position_embeddings();
 48    learned_pos_embd = ggml_cont_4d(
 49        ctx0, learned_pos_embd,
 50        n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
 51    learned_pos_embd = ggml_reshape_4d(
 52        ctx0, learned_pos_embd,
 53        n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
 54    learned_pos_embd = ggml_permute(ctx0, learned_pos_embd, 0, 2, 1, 3);
 55    learned_pos_embd = ggml_cont_3d(
 56        ctx0, learned_pos_embd,
 57        n_embd, n_patches_x * n_patches_y, batch_size);
 58    inp = ggml_add(ctx0, inp, learned_pos_embd);
 59    cb(inp, "inp_pos_emb", -1);
 60
 61    ggml_tensor * inpL = inp;
 62
 63    ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
 64    ggml_set_name(positions, "positions");
 65    ggml_set_input(positions);
 66
 67    // pre-layernorm
 68    if (model.pre_ln_w) {
 69        inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
 70    }
 71
 72    // deepstack features (stack along the feature dimension), [n_embd * len(deepstack_layers), n_patches_x * n_patches_y, batch_size]
 73    ggml_tensor * deepstack_features = nullptr;
 74    const int merge_factor = hparams.n_merge > 0 ? hparams.n_merge * hparams.n_merge : 4; // default 2x2=4 for qwen3vl
 75
 76    // loop over layers
 77    for (int il = 0; il < n_layer; il++) {
 78        auto & layer = model.layers[il];
 79
 80        ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
 81
 82        // layernorm1
 83        cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
 84        cb(cur, "ln1", il);
 85
 86        // self-attention
 87        {
 88            cur = ggml_mul_mat(ctx0, layer.qkv_w, cur);
 89            cur = ggml_add(ctx0, cur, layer.qkv_b);
 90
 91            ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
 92                    /* nb1    */ ggml_row_size(cur->type, d_head),
 93                    /* nb2    */ cur->nb[1],
 94                    /* offset */ 0);
 95
 96            ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
 97                    /* nb1    */ ggml_row_size(cur->type, d_head),
 98                    /* nb2    */ cur->nb[1],
 99                    /* offset */ ggml_row_size(cur->type, n_embd));
100
101            ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, d_head, n_head, n_pos,
102                    /* nb1    */ ggml_row_size(cur->type, d_head),
103                    /* nb2    */ cur->nb[1],
104                    /* offset */ ggml_row_size(cur->type, 2 * n_embd));
105
106            cb(Qcur, "Qcur", il);
107            cb(Kcur, "Kcur", il);
108            cb(Vcur, "Vcur", il);
109
110            // apply M-RoPE
111            Qcur = ggml_rope_multi(
112                ctx0, Qcur, positions, nullptr,
113                d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
114            Kcur = ggml_rope_multi(
115                ctx0, Kcur, positions, nullptr,
116                d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
117
118            cb(Qcur, "Qcur_rope", il);
119            cb(Kcur, "Kcur_rope", il);
120
121            cur = build_attn(layer.o_w, layer.o_b,
122                Qcur, Kcur, Vcur, nullptr, kq_scale, il);
123            cb(cur, "attn_out", il);
124        }
125
126        // re-add the layer input, e.g., residual
127        cur = ggml_add(ctx0, cur, inpL);
128
129        inpL = cur; // inpL = residual, cur = hidden_states
130
131        cb(cur, "ffn_inp", il);
132
133        // layernorm2
134        cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
135        cb(cur, "ffn_inp_normed", il);
136
137        // ffn
138        cur = build_ffn(cur,
139            layer.ff_up_w, layer.ff_up_b,
140            layer.ff_gate_w, layer.ff_gate_b,
141            layer.ff_down_w, layer.ff_down_b,
142            hparams.ffn_op, il);
143
144        cb(cur, "ffn_out", il);
145
146        // residual 2
147        cur = ggml_add(ctx0, inpL, cur);
148        cb(cur, "layer_out", il);
149
150        if (layer.has_deepstack()) {
151            ggml_tensor * feat = ggml_reshape_3d(ctx0, cur, n_embd * merge_factor, n_pos / merge_factor, batch_size);
152            feat = build_norm(feat, layer.deepstack_norm_w, layer.deepstack_norm_b, norm_t, eps, il);
153            feat = build_ffn(feat,
154                layer.deepstack_fc1_w, layer.deepstack_fc1_b,
155                nullptr, nullptr,
156                layer.deepstack_fc2_w, layer.deepstack_fc2_b,
157                ffn_op_type::FFN_GELU, il);
158
159            if(!deepstack_features) {
160                deepstack_features = feat;
161            } else {
162                // concat along the feature dimension
163                deepstack_features = ggml_concat(ctx0, deepstack_features, feat, 0);
164            }
165        }
166
167        inpL = cur;
168    }
169
170    // post-layernorm
171    if (model.post_ln_w) {
172        inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
173    }
174
175    // multimodal projection
176    ggml_tensor * embeddings = inpL;
177    embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
178
179    embeddings = build_ffn(embeddings,
180        model.mm_0_w, model.mm_0_b,
181        nullptr, nullptr,
182        model.mm_1_w, model.mm_1_b,
183        ffn_op_type::FFN_GELU, -1);
184
185    if (deepstack_features) {
186        embeddings = ggml_concat(ctx0, embeddings, deepstack_features, 0);
187    } // concat along the feature dimension
188
189    // build the graph
190    ggml_build_forward_expand(gf, embeddings);
191
192    return gf;
193}