1#include "models.h"
  2
  3llm_build_deepseek2::llm_build_deepseek2(const llama_model & model, const llm_graph_params & params) :
  4    llm_graph_context(params) {
  5    const bool is_mla = hparams.is_mla();
  6
  7    // note: these are the actual head sizes you get when treating as MHA or after "decompression" using wv_b for MLA
  8    const int64_t n_embd_head_k = hparams.n_embd_head_k_mla();
  9    const int64_t n_embd_head_v = hparams.n_embd_head_v_mla();
 10
 11    const int64_t n_embd_head_qk_rope = hparams.n_rot;
 12    const int64_t n_embd_head_qk_nope = n_embd_head_k - n_embd_head_qk_rope;
 13
 14    const uint32_t kv_lora_rank = hparams.n_lora_kv;
 15
 16    // We have to pre-scale kq_scale and attn_factor to make the YaRN RoPE work correctly.
 17    // See https://github.com/ggml-org/llama.cpp/discussions/7416 for detailed explanation.
 18    // And also: https://github.com/ggml-org/llama.cpp/pull/17945 [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX]
 19
 20    // first cancel the adjustment from llama_hparams::yarn_attn_factor_adjust to get the original attn_factor
 21    GGML_ASSERT(ext_factor >= 0.0f);
 22    const float attn_factor_org = attn_factor * (1.0f + 0.1f * logf(1.0f / freq_scale));
 23
 24    // use the original attn_factor to pre-scale the kq_scale
 25    const float mscale   = attn_factor_org * (1.0f + 0.1f * hparams.rope_yarn_log_mul * logf(1.0f / freq_scale));
 26    const float kq_scale = 1.0f * mscale * mscale / sqrtf(float(n_embd_head_k));
 27
 28    ggml_tensor * cur;
 29    ggml_tensor * inpL;
 30
 31    // {n_embd, n_tokens}
 32    inpL = build_inp_embd(model.tok_embd);
 33
 34    // (optional) temperature tuning - used by mistral-large
 35    ggml_tensor * inp_attn_scale = nullptr;
 36    if (hparams.f_attn_temp_scale != 0.0f) {
 37        inp_attn_scale = build_inp_attn_scale();
 38    }
 39
 40    // inp_pos - contains the positions
 41    ggml_tensor * inp_pos = build_inp_pos();
 42
 43    auto * inp_attn_kv = !is_mla ? build_attn_inp_kv() : nullptr;
 44    auto * inp_attn_k  =  is_mla ? build_attn_inp_k()  : nullptr;
 45
 46    ggml_tensor * inp_out_ids = build_inp_out_ids();
 47
 48    for (int il = 0; il < n_layer; ++il) {
 49        ggml_tensor * inpSA = inpL;
 50
 51        // norm
 52        cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
 53        cb(cur, "attn_norm", il);
 54
 55        // self_attention
 56        {
 57            ggml_tensor * q = NULL;
 58
 59            const bool is_lite = model.layers[il].wq;
 60
 61            if (!is_lite) {
 62                q = ggml_mul_mat(ctx0, model.layers[il].wq_a, cur);
 63                cb(q, "q", il);
 64
 65                q = build_norm(q, model.layers[il].attn_q_a_norm, nullptr, LLM_NORM_RMS, il);
 66                cb(q, "q", il);
 67
 68                q = ggml_mul_mat(ctx0, model.layers[il].wq_b, q);
 69                cb(q, "q", il);
 70            } else {
 71                q = ggml_mul_mat(ctx0, model.layers[il].wq, cur);
 72                cb(q, "q", il);
 73            }
 74            // split into {n_embd_head_qk_nope, n_head, n_tokens}
 75            ggml_tensor * q_nope =
 76                ggml_view_3d(ctx0, q, n_embd_head_qk_nope, n_head, n_tokens, ggml_row_size(q->type, n_embd_head_k),
 77                             ggml_row_size(q->type, n_embd_head_k) * n_head, 0);
 78            cb(q_nope, "q_nope", il);
 79
 80            // and {n_embd_head_qk_rope, n_head, n_tokens}
 81            ggml_tensor * q_pe = ggml_view_3d(
 82                ctx0, q, n_embd_head_qk_rope, n_head, n_tokens, ggml_row_size(q->type, n_embd_head_k),
 83                ggml_row_size(q->type, n_embd_head_k) * n_head, ggml_row_size(q->type, n_embd_head_qk_nope));
 84            cb(q_pe, "q_pe", il);
 85
 86            ggml_tensor * kv_cmpr_pe = ggml_mul_mat(ctx0, model.layers[il].wkv_a_mqa, cur);
 87            cb(kv_cmpr_pe, "kv_cmpr_pe", il);
 88
 89            // split into {kv_lora_rank, n_tokens}
 90            ggml_tensor * kv_cmpr =
 91                ggml_view_2d(ctx0, kv_cmpr_pe, kv_lora_rank, n_tokens,
 92                             ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope), 0);
 93            cb(kv_cmpr, "kv_cmpr", il);
 94
 95            // and {n_embd_head_qk_rope, 1, n_tokens}
 96            ggml_tensor * k_pe = ggml_view_3d(ctx0, kv_cmpr_pe, n_embd_head_qk_rope, 1, n_tokens,
 97                                              ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
 98                                              ggml_row_size(kv_cmpr_pe->type, kv_lora_rank + n_embd_head_qk_rope),
 99                                              ggml_row_size(kv_cmpr_pe->type, kv_lora_rank));
100            cb(k_pe, "k_pe", il);
101
102            q_pe = ggml_rope_ext(ctx0, q_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
103                                 ext_factor, attn_factor, beta_fast, beta_slow);
104            cb(q_pe, "q_pe", il);
105
106            k_pe = ggml_rope_ext(ctx0, k_pe, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
107                                 ext_factor, attn_factor, beta_fast, beta_slow);
108            cb(k_pe, "k_pe", il);
109
110            kv_cmpr = build_norm(kv_cmpr, model.layers[il].attn_kv_a_norm, nullptr, LLM_NORM_RMS, il);
111            cb(kv_cmpr, "kv_cmpr", il);
112
113            if (is_mla) {
114                // {n_embd_head_qk_nope, n_tokens, n_head}
115                q_nope = ggml_permute(ctx0, q_nope, 0, 2, 1, 3);
116                cb(q_nope, "q_nope_perm", il);
117
118                // {n_embd_head_qk_nope, kv_lora_rank, n_head} x {n_embd_head_qk_nope, n_tokens, n_head}
119                ggml_tensor * q_nope_absorbed = ggml_mul_mat(ctx0, model.layers[il].wk_b, q_nope);
120                cb(q_nope_absorbed, "q_nope_absorbed", il);
121
122                // {kv_lora_rank, n_head, n_tokens}
123                q_nope_absorbed = ggml_permute(ctx0, q_nope_absorbed, 0, 2, 1, 3);
124                cb(q_nope_absorbed, "q_nope_absorbed_perm", il);
125
126                // {n_embd_head_qk_rope + kv_lora_rank, n_head, n_tokens}
127                // note: rope must go first for in-place context shifting in build_rope_shift()
128                ggml_tensor * Qcur = ggml_concat(ctx0, q_nope_absorbed, q_pe, 0);
129                cb(Qcur, "Qcur", il);
130
131                kv_cmpr = ggml_reshape_3d(ctx0, kv_cmpr, kv_lora_rank, 1, n_tokens);
132                cb(kv_cmpr, "kv_cmpr_reshape", il);
133
134                // {n_embd_head_qk_rope + kv_lora_rank, 1, n_tokens}
135                ggml_tensor * Kcur = ggml_concat(ctx0, kv_cmpr, k_pe, 0);
136                cb(Kcur, "Kcur", il);
137
138                // {kv_lora_rank, 1, n_tokens}
139                ggml_tensor * Vcur = kv_cmpr;
140                cb(Vcur, "Vcur", il);
141
142                if (inp_attn_scale) {
143                    // apply llama 4 temperature scaling
144                    Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
145                    cb(Qcur, "Qcur_attn_temp_scaled", il);
146                }
147
148                // note: MLA with the absorption optimzation converts into MQA (ie: GQA with 1 group)
149                cur = build_attn(inp_attn_k,
150                        model.layers[il].wo, NULL,
151                        Qcur, Kcur, Vcur, nullptr, nullptr, model.layers[il].wv_b, kq_scale, il);
152            } else {
153                ggml_tensor * kv = ggml_mul_mat(ctx0, model.layers[il].wkv_b, kv_cmpr);
154                cb(kv, "kv", il);
155
156                // split into {n_embd_head_qk_nope, n_head, n_tokens}
157                ggml_tensor * k_nope =
158                    ggml_view_3d(ctx0, kv, n_embd_head_qk_nope, n_head, n_tokens,
159                                 ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v),
160                                 ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head, 0);
161                cb(k_nope, "k_nope_view", il);
162
163                // and {n_embd_head_v, n_head, n_tokens}
164                ggml_tensor * Vcur = ggml_view_3d(ctx0, kv, n_embd_head_v, n_head, n_tokens,
165                                                  ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v),
166                                                  ggml_row_size(kv->type, n_embd_head_qk_nope + n_embd_head_v) * n_head,
167                                                  ggml_row_size(kv->type, n_embd_head_qk_nope));
168                cb(Vcur, "Vcur_view", il);
169
170                Vcur = ggml_cont(ctx0, Vcur);
171                cb(Vcur, "Vcur_cont", il);
172
173                ggml_tensor * Qcur = ggml_concat(ctx0, q_nope, q_pe, 0);
174                cb(Qcur, "Qcur", il);
175
176                ggml_tensor * Kcur = ggml_concat(ctx0, k_nope, ggml_repeat(ctx0, k_pe, q_pe), 0);
177                cb(Kcur, "Kcur", il);
178
179                if (inp_attn_scale) {
180                    // apply llama 4 temperature scaling
181                    Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
182                    cb(Qcur, "Qcur_attn_temp_scaled", il);
183                }
184
185                // note: MLA without the absorption optimization converts into MHA (ie: GQA with full n_head groups)
186                cur = build_attn(inp_attn_kv,
187                            model.layers[il].wo, NULL,
188                            Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
189            }
190        }
191        if (il == n_layer - 1 && inp_out_ids) {
192            cur   = ggml_get_rows(ctx0, cur, inp_out_ids);
193            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
194        }
195        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
196        cb(ffn_inp, "ffn_inp", il);
197
198        cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
199        cb(cur, "ffn_norm", il);
200
201        if ((uint32_t) il < hparams.n_layer_dense_lead) {
202            cur = build_ffn(cur,
203                model.layers[il].ffn_up, NULL, NULL,
204                model.layers[il].ffn_gate, NULL, NULL,
205                model.layers[il].ffn_down, NULL, NULL,
206                NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
207            cb(cur, "ffn_out", il);
208        } else {
209            // MoE branch
210            ggml_tensor * moe_out = build_moe_ffn(cur,
211                model.layers[il].ffn_gate_inp,
212                model.layers[il].ffn_up_exps,
213                model.layers[il].ffn_gate_exps,
214                model.layers[il].ffn_down_exps,
215                model.layers[il].ffn_exp_probs_b,
216                n_expert, n_expert_used,
217                LLM_FFN_SILU, hparams.expert_weights_norm,
218                hparams.expert_weights_scale, hparams.expert_weights_scale,
219                (llama_expert_gating_func_type) hparams.expert_gating_func,
220                il);
221            cb(moe_out, "ffn_moe_out", il);
222
223            // FFN shared expert
224            {
225                ggml_tensor * ffn_shexp =
226                    build_ffn(cur,
227                        model.layers[il].ffn_up_shexp, NULL, NULL,
228                        model.layers[il].ffn_gate_shexp, NULL, NULL,
229                        model.layers[il].ffn_down_shexp, NULL, NULL,
230                        NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
231                cb(ffn_shexp, "ffn_shexp", il);
232
233                cur = ggml_add(ctx0, moe_out, ffn_shexp);
234                cb(cur, "ffn_out", il);
235            }
236        }
237        cur = ggml_add(ctx0, cur, ffn_inp);
238
239        cur = build_cvec(cur, il);
240        cb(cur, "l_out", il);
241
242        // input for next layer
243        inpL = cur;
244    }
245    cur = inpL;
246
247    cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
248
249    cb(cur, "result_norm", -1);
250    res->t_embd = cur;
251
252    // lm_head
253    cur = ggml_mul_mat(ctx0, model.output, cur);
254
255    cb(cur, "result_output", -1);
256    res->t_logits = cur;
257
258    ggml_build_forward_expand(gf, cur);
259}