1#include "models.h"
  2
  3ggml_cgraph * clip_graph_qwen2vl::build() {
  4    GGML_ASSERT(model.patch_bias == nullptr);
  5    GGML_ASSERT(model.class_embedding == nullptr);
  6
  7    const int batch_size       = 1;
  8    const bool use_window_attn = hparams.n_wa_pattern > 0;
  9    const int n_wa_pattern     = hparams.n_wa_pattern;
 10    const int n_pos            = n_patches;
 11    const int num_position_ids = n_pos * 4; // m-rope requires 4 dim per position
 12
 13    norm_type norm_t = proj_type == PROJECTOR_TYPE_QWEN25VL
 14        ? NORM_TYPE_RMS // qwen 2.5 vl
 15        : NORM_TYPE_NORMAL; // qwen 2 vl
 16
 17    int mrope_sections[4] = {d_head/4, d_head/4, d_head/4, d_head/4};
 18
 19    ggml_tensor * inp_raw = build_inp_raw();
 20    ggml_tensor * inp = ggml_conv_2d(ctx0, model.patch_embeddings_0, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
 21
 22    GGML_ASSERT(img.nx % (patch_size * 2) == 0);
 23    GGML_ASSERT(img.ny % (patch_size * 2) == 0);
 24
 25    // second conv dimension
 26    {
 27        auto inp_1 = ggml_conv_2d(ctx0, model.patch_embeddings_1, inp_raw, patch_size, patch_size, 0, 0, 1, 1);
 28        inp = ggml_add(ctx0, inp, inp_1);
 29
 30        inp = ggml_permute(ctx0, inp, 1, 2, 0, 3);  // [w, h, c, b] -> [c, w, h, b]
 31        inp = ggml_cont_4d(
 32            ctx0, inp,
 33            n_embd * 2, n_patches_x / 2, n_patches_y, batch_size);
 34        inp = ggml_reshape_4d(
 35            ctx0, inp,
 36            n_embd * 2, n_patches_x / 2, 2, batch_size * (n_patches_y / 2));
 37        inp = ggml_permute(ctx0, inp, 0, 2, 1, 3);
 38        inp = ggml_cont_3d(
 39            ctx0, inp,
 40            n_embd, n_patches_x * n_patches_y, batch_size);
 41    }
 42
 43    ggml_tensor * inpL           = inp;
 44    ggml_tensor * window_mask    = nullptr;
 45    ggml_tensor * window_idx     = nullptr;
 46    ggml_tensor * inv_window_idx = nullptr;
 47
 48    ggml_tensor * positions = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, num_position_ids);
 49    ggml_set_name(positions, "positions");
 50    ggml_set_input(positions);
 51
 52    // pre-layernorm
 53    if (model.pre_ln_w) {
 54        inpL = build_norm(inpL, model.pre_ln_w, model.pre_ln_b, norm_t, eps, -1);
 55    }
 56
 57    if (use_window_attn) {
 58        // handle window attention inputs
 59        inv_window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
 60        ggml_set_name(inv_window_idx, "inv_window_idx");
 61        ggml_set_input(inv_window_idx);
 62        // mask for window attention
 63        window_mask = ggml_new_tensor_2d(ctx0, GGML_TYPE_F32, n_pos, n_pos);
 64        ggml_set_name(window_mask, "window_mask");
 65        ggml_set_input(window_mask);
 66
 67        // if flash attn is used, we need to pad the mask and cast to f16
 68        if (flash_attn_type == CLIP_FLASH_ATTN_TYPE_ENABLED) {
 69            window_mask = ggml_cast(ctx0, window_mask, GGML_TYPE_F16);
 70        }
 71
 72        // inpL shape: [n_embd, n_patches_x * n_patches_y, batch_size]
 73        GGML_ASSERT(batch_size == 1);
 74        inpL = ggml_reshape_2d(ctx0, inpL, n_embd * 4, n_patches_x * n_patches_y * batch_size / 4);
 75        inpL = ggml_get_rows(ctx0, inpL, inv_window_idx);
 76        inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_patches_x * n_patches_y, batch_size);
 77    }
 78
 79    // loop over layers
 80    for (int il = 0; il < n_layer; il++) {
 81        const auto & layer = model.layers[il];
 82        const bool full_attn = use_window_attn ? (il + 1) % n_wa_pattern == 0 : true;
 83
 84        ggml_tensor * cur = inpL; // inpL = residual, cur = hidden_states
 85
 86        // layernorm1
 87        cur = build_norm(cur, layer.ln_1_w, layer.ln_1_b, norm_t, eps, il);
 88        cb(cur, "ln1", il);
 89
 90        // self-attention
 91        {
 92            ggml_tensor * Qcur = ggml_add(ctx0,
 93                ggml_mul_mat(ctx0, layer.q_w, cur), layer.q_b);
 94            ggml_tensor * Kcur = ggml_add(ctx0,
 95                ggml_mul_mat(ctx0, layer.k_w, cur), layer.k_b);
 96            ggml_tensor * Vcur = ggml_add(ctx0,
 97                ggml_mul_mat(ctx0, layer.v_w, cur), layer.v_b);
 98
 99            Qcur = ggml_reshape_3d(ctx0, Qcur, d_head, n_head, n_patches);
100            Kcur = ggml_reshape_3d(ctx0, Kcur, d_head, n_head, n_patches);
101            Vcur = ggml_reshape_3d(ctx0, Vcur, d_head, n_head, n_patches);
102
103            cb(Qcur, "Qcur", il);
104            cb(Kcur, "Kcur", il);
105            cb(Vcur, "Vcur", il);
106
107            // apply M-RoPE
108            Qcur = ggml_rope_multi(
109                ctx0, Qcur, positions, nullptr,
110                d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
111            Kcur = ggml_rope_multi(
112                ctx0, Kcur, positions, nullptr,
113                d_head/2, mrope_sections, GGML_ROPE_TYPE_VISION, 32768, 10000, 1, 0, 1, 32, 1);
114
115            cb(Qcur, "Qcur_rope", il);
116            cb(Kcur, "Kcur_rope", il);
117
118            ggml_tensor * attn_mask = full_attn ? nullptr : window_mask;
119
120            cur = build_attn(layer.o_w, layer.o_b,
121                Qcur, Kcur, Vcur, attn_mask, kq_scale, il);
122            cb(cur, "attn_out", il);
123        }
124
125        // re-add the layer input, e.g., residual
126        cur = ggml_add(ctx0, cur, inpL);
127
128        inpL = cur; // inpL = residual, cur = hidden_states
129
130        cb(cur, "ffn_inp", il);
131
132        // layernorm2
133        cur = build_norm(cur, layer.ln_2_w, layer.ln_2_b, norm_t, eps, il);
134        cb(cur, "ffn_inp_normed", il);
135
136        // ffn
137        cur = build_ffn(cur,
138            layer.ff_up_w, layer.ff_up_b,
139            layer.ff_gate_w, layer.ff_gate_b,
140            layer.ff_down_w, layer.ff_down_b,
141            hparams.ffn_op, il);
142
143        cb(cur, "ffn_out", il);
144
145        // residual 2
146        cur = ggml_add(ctx0, inpL, cur);
147        cb(cur, "layer_out", il);
148
149        inpL = cur;
150    }
151
152    // post-layernorm
153    if (model.post_ln_w) {
154        inpL = build_norm(inpL, model.post_ln_w, model.post_ln_b, norm_t, eps, n_layer);
155    }
156
157    // multimodal projection
158    ggml_tensor * embeddings = inpL;
159    embeddings = ggml_reshape_3d(ctx0, embeddings, n_embd * 4, n_pos / 4, batch_size);
160    embeddings = build_ffn(embeddings,
161                        model.mm_0_w, model.mm_0_b,
162                        nullptr, nullptr,
163                        model.mm_1_w, model.mm_1_b,
164                        FFN_GELU,
165                        -1);
166
167    if (use_window_attn) {
168        window_idx = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_pos / 4);
169        ggml_set_name(window_idx, "window_idx");
170        ggml_set_input(window_idx);
171
172        // embeddings shape: [n_embd, n_patches_x * n_patches_y, batch_size]
173        GGML_ASSERT(batch_size == 1);
174        embeddings = ggml_reshape_2d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4);
175        embeddings = ggml_get_rows(ctx0, embeddings, window_idx);
176        embeddings = ggml_reshape_3d(ctx0, embeddings, hparams.projection_dim, n_patches_x * n_patches_y / 4, batch_size);
177    }
178
179    // build the graph
180    ggml_build_forward_expand(gf, embeddings);
181
182    return gf;
183}