1#include "models.h"
  2
  3
  4
  5llm_build_glm4::llm_build_glm4(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
  6    const int64_t n_embd_head = hparams.n_embd_head_v;
  7    const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
  8
  9    GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
 10
 11    int sections[4];
 12    std::copy(std::begin(hparams.rope_sections), std::begin(hparams.rope_sections) + 4, sections);
 13
 14    ggml_tensor * cur;
 15    ggml_tensor * inpL;
 16
 17    inpL = build_inp_embd(model.tok_embd);
 18
 19    bool use_mrope = hparams.use_mrope();
 20    if (ubatch.embd && !use_mrope) {
 21        // unfortunately, we need to forcefully stop here, to avoid users complaining about wrong results
 22        GGML_ABORT("This GGUF does not support multimodal. Please reconvert it.");
 23    }
 24
 25    // inp_pos - contains the positions
 26    ggml_tensor * inp_pos = build_inp_pos();
 27
 28    auto * inp_attn = build_attn_inp_kv();
 29
 30    ggml_tensor * inp_out_ids = build_inp_out_ids();
 31
 32    for (int il = 0; il < n_layer; ++il) {
 33        ggml_tensor * inpSA = inpL;
 34
 35        // Pre-attention norm
 36        cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
 37        cb(cur, "attn_norm", il);
 38
 39        // self-attention
 40        {
 41            ggml_tensor * Qcur = nullptr;
 42            ggml_tensor * Kcur = nullptr;
 43            ggml_tensor * Vcur = nullptr;
 44
 45            if (model.layers[il].wqkv == nullptr) {
 46                Qcur = build_lora_mm(model.layers[il].wq, cur);
 47                if (model.layers[il].bq) {
 48                    Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
 49                }
 50                Kcur = build_lora_mm(model.layers[il].wk, cur);
 51                if (model.layers[il].bk) {
 52                    Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
 53                }
 54                Vcur = build_lora_mm(model.layers[il].wv, cur);
 55                if (model.layers[il].bv) {
 56                    Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
 57                }
 58                Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
 59                Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
 60                Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
 61            } else {
 62                cur = build_lora_mm(model.layers[il].wqkv, cur);
 63                cb(cur, "wqkv", il);
 64                if (model.layers[il].bqkv) {
 65                    cur = ggml_add(ctx0, cur, model.layers[il].bqkv);
 66                    cb(cur, "bqkv", il);
 67                }
 68                Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head * sizeof(float), cur->nb[1],
 69                                    0 * sizeof(float) * (n_embd));
 70                Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float),
 71                                    cur->nb[1], 1 * sizeof(float) * (n_embd));
 72                Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float),
 73                                    cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa));
 74            }
 75
 76            if (use_mrope) {
 77                Qcur = ggml_rope_multi(ctx0, Qcur, inp_pos, nullptr,
 78                            n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
 79                            ext_factor, attn_factor, beta_fast, beta_slow);
 80
 81                Kcur = ggml_rope_multi(ctx0, Kcur, inp_pos, nullptr,
 82                            n_rot, sections, rope_type, n_ctx_orig, freq_base, freq_scale,
 83                            ext_factor, attn_factor, beta_fast, beta_slow);
 84            } else {
 85                // Normal RoPE
 86                Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot,
 87                                    rope_type, n_ctx_orig, freq_base, freq_scale,
 88                                    ext_factor, attn_factor, beta_fast, beta_slow);
 89
 90                Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot,
 91                                    rope_type, n_ctx_orig, freq_base, freq_scale,
 92                                    ext_factor, attn_factor, beta_fast, beta_slow);
 93            }
 94
 95            cb(Qcur, "Qcur", il);
 96            cb(Kcur, "Kcur", il);
 97            cb(Vcur, "Vcur", il);
 98
 99            cur = build_attn(inp_attn,
100                    model.layers[il].wo, NULL,
101                    Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
102        }
103        if (il == n_layer - 1 && inp_out_ids) {
104            cur   = ggml_get_rows(ctx0, cur, inp_out_ids);
105            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
106        }
107        // Post-attention norm (new!)
108        cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il);
109        cb(cur, "post_attn_norm", il);
110
111        // Add the input (residual connection after post-attention norm)
112        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
113        cb(ffn_inp, "ffn_inp", il);
114
115        // FF
116        {
117            // Pre-MLP norm
118            cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
119            cb(cur, "ffn_norm", il);
120
121            // MLP
122            cur = build_ffn(cur,
123                    model.layers[il].ffn_up, NULL, NULL,
124                    NULL, NULL, NULL,
125                    model.layers[il].ffn_down, NULL, NULL,
126                    NULL, LLM_FFN_SWIGLU, LLM_FFN_SEQ, il);
127            cb(cur, "ffn_out", il);
128
129            // Post-MLP norm
130            cur = build_norm(cur, model.layers[il].ffn_post_norm, NULL, LLM_NORM_RMS, il);
131            cb(cur, "post_mlp_norm", il);
132        }
133        // Add residual connection after post-MLP norm
134        inpL = ggml_add(ctx0, cur, ffn_inp);
135        cb(inpL, "l_out", il);
136    }
137    // Final norm
138    cur = build_norm(inpL, model.output_norm, NULL, LLM_NORM_RMS, -1);
139
140    cb(cur, "result_norm", -1);
141    res->t_embd = cur;
142
143    // Output projection
144    cur = build_lora_mm(model.output, cur);
145
146    cb(cur, "result_output", -1);
147    res->t_logits = cur;
148
149    ggml_build_forward_expand(gf, cur);
150}