1#include "models.h"
  2#include <cstring>
  3#include <cmath>
  4
  5// note: this is similar to clip_graph::resize_position_embeddings, major difference is having
  6// the w/h in ne[1] and ne[2] instead of assuming with sqrt. Could try storing the tensor in 2D instead
  7// with a w*h? Also the permute is a bit different at (2, 1, 0, 3) instead of (2, 0, 1, 3).
  8ggml_tensor * clip_graph_kimik25::resize_position_embeddings_3d(uint32_t interpolation_mode) {
  9    ggml_tensor * pos_embd = model.position_embeddings;
 10    const int height       = img.ny / patch_size;
 11    const int width        = img.nx / patch_size;
 12    const uint32_t mode    = interpolation_mode;
 13
 14    GGML_ASSERT(pos_embd);
 15
 16    const int64_t stored_c = pos_embd->ne[0];  // C = 1152
 17    const int64_t orig_w = pos_embd->ne[1];    // W = 64
 18    const int64_t orig_h = pos_embd->ne[2];    // H = 64
 19
 20    GGML_ASSERT(stored_c == n_embd);
 21
 22    if (height == (int)orig_h && width == (int)orig_w) {
 23        // No interpolation needed, just flatten to [C, H*W]
 24        return ggml_cont_2d(ctx0, pos_embd, n_embd, width * height);
 25    }
 26
 27    pos_embd = ggml_permute(ctx0, pos_embd, 2, 1, 0, 3);
 28    pos_embd = ggml_interpolate(ctx0, pos_embd, height, width, n_embd, 1, mode);
 29    pos_embd = ggml_permute(ctx0, pos_embd, 2, 1, 0, 3);
 30    pos_embd = ggml_cont_2d(ctx0, pos_embd, n_embd, width * height);
 31    return pos_embd;
 32}
 33
 34ggml_cgraph * clip_graph_kimik25::build() {
 35    ggml_tensor * pos_h = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
 36    ggml_set_name(pos_h, "pos_h");
 37    ggml_set_input(pos_h);
 38
 39    ggml_tensor * pos_w = ggml_new_tensor_1d(ctx0, GGML_TYPE_I32, n_patches);
 40    ggml_set_name(pos_w, "pos_w");
 41    ggml_set_input(pos_w);
 42
 43    ggml_tensor * learned_pos_embd = resize_position_embeddings_3d(GGML_SCALE_MODE_BICUBIC);
 44
 45    // Kimi-K2.5 uses interleaved 2D RoPE pattern natively, but
 46    // Q / K are permuted during conversion to use split format.
 47    auto add_pos = [&](ggml_tensor * cur, const clip_layer &) {
 48        cur = build_rope_2d(ctx0, cur, pos_w, pos_h, hparams.rope_theta, false);
 49        return cur;
 50    };
 51
 52    ggml_tensor * inp = build_inp();
 53
 54    // I don't know why, but doing this in the build_vit lead to the ggml_add not occurring?
 55    // Doing it manually here does work.
 56    inp = ggml_add(ctx0, inp, learned_pos_embd);
 57
 58    ggml_tensor * cur = build_vit(
 59                            inp, n_patches,
 60                            NORM_TYPE_NORMAL,
 61                            hparams.ffn_op,
 62                            nullptr,
 63                            add_pos);
 64
 65    cb(cur, "vit_out", -1);
 66
 67    {
 68        // patch_merger
 69        const int scale_factor = model.hparams.n_merge;
 70        cur = build_patch_merge_permute(cur, scale_factor);
 71
 72        // projection norm
 73        int proj_inp_dim = cur->ne[0];
 74        int n_merged_patches = cur->ne[1];
 75        cur = ggml_view_2d(ctx0, cur,
 76            n_embd, n_merged_patches * scale_factor * scale_factor,
 77            ggml_row_size(cur->type, n_embd), 0);
 78        cur = ggml_norm(ctx0, cur, hparams.eps);
 79        cur = ggml_mul(ctx0, cur, model.mm_input_norm_w);
 80        cur = ggml_add(ctx0, cur, model.mm_input_norm_b);
 81        cur = ggml_view_2d(ctx0, cur,
 82            proj_inp_dim, n_merged_patches,
 83            ggml_row_size(cur->type, proj_inp_dim), 0);
 84        cb(cur, "proj_inp_normed", -1);
 85
 86        // projection mlp
 87        cur = build_ffn(cur,
 88            model.mm_1_w, model.mm_1_b,
 89            nullptr, nullptr,
 90            model.mm_2_w, model.mm_2_b,
 91            FFN_GELU,
 92            -1);
 93
 94        cb(cur, "proj_out", -1);
 95    }
 96
 97    // build the graph
 98    ggml_build_forward_expand(gf, cur);
 99
100    return gf;
101}