1#include "models.h"
  2
  3llm_build_llama_iswa::llm_build_llama_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
  4    const int64_t n_embd_head = hparams.n_embd_head_v;
  5
  6    GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
  7    GGML_ASSERT(n_embd_head == hparams.n_rot);
  8
  9    ggml_tensor * cur;
 10    ggml_tensor * inpL;
 11
 12    inpL = build_inp_embd(model.tok_embd);
 13
 14    // inp_pos - contains the positions
 15    ggml_tensor * inp_pos = build_inp_pos();
 16
 17    // temperature tuning
 18    ggml_tensor * inp_attn_scale = nullptr;
 19    inp_attn_scale = build_inp_attn_scale();
 20
 21    auto * inp_attn = build_attn_inp_kv_iswa();
 22
 23    const float kq_scale = hparams.f_attention_scale == 0.0f ? 1.0f/sqrtf(float(n_embd_head)) : hparams.f_attention_scale;
 24
 25    ggml_tensor * inp_out_ids = build_inp_out_ids();
 26
 27    for (int il = 0; il < n_layer; ++il) {
 28        const float freq_base_l  = model.get_rope_freq_base (cparams, il);
 29        const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
 30
 31        ggml_tensor * inpSA = inpL;
 32
 33        // This overlaps with SWA layers in current models, so get_rope_freq_base/scale may be superfluous
 34        const bool use_rope = hparams.n_no_rope_layer_step > 0 &&
 35                              (il + 1) % hparams.n_no_rope_layer_step != 0;
 36
 37        // norm
 38        cur = build_norm(inpL,
 39                model.layers[il].attn_norm, NULL,
 40                LLM_NORM_RMS, il);
 41        cb(cur, "attn_norm", il);
 42
 43        // self-attention
 44        {
 45            // rope freq factors for llama3; may return nullptr for llama2 and other models
 46            ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
 47
 48            // compute Q and K and RoPE them
 49            ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
 50            cb(Qcur, "Qcur", il);
 51            if (model.layers[il].bq) {
 52                Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
 53                cb(Qcur, "Qcur", il);
 54            }
 55            ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
 56            cb(Kcur, "Kcur", il);
 57            if (model.layers[il].bk) {
 58                Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
 59                cb(Kcur, "Kcur", il);
 60            }
 61            ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
 62            cb(Vcur, "Vcur", il);
 63            if (model.layers[il].bv) {
 64                Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
 65                cb(Vcur, "Vcur", il);
 66            }
 67            Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head,    n_tokens);
 68            Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
 69            Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
 70
 71            if (use_rope) {
 72                Qcur = ggml_rope_ext(
 73                        ctx0, Qcur, inp_pos, rope_factors,
 74                        n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
 75                        ext_factor, attn_factor, beta_fast, beta_slow
 76                        );
 77
 78                Kcur = ggml_rope_ext(
 79                        ctx0, Kcur, inp_pos, rope_factors,
 80                        n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
 81                        ext_factor, attn_factor, beta_fast, beta_slow
 82                        );
 83            } else if (inp_attn_scale) {
 84                Qcur = ggml_mul(ctx0, Qcur, inp_attn_scale);
 85            }
 86            cb(Qcur, "Qcur", il);
 87            cb(Kcur, "Kcur", il);
 88            cb(Vcur, "Vcur", il);
 89
 90            if (use_rope && hparams.use_kq_norm) {
 91                // Llama4TextL2Norm
 92                Qcur = ggml_rms_norm(ctx0, Qcur, hparams.f_norm_rms_eps);
 93                Kcur = ggml_rms_norm(ctx0, Kcur, hparams.f_norm_rms_eps);
 94                cb(Qcur, "Qcur_normed", il);
 95                cb(Kcur, "Kcur_normed", il);
 96            }
 97            cur = build_attn(inp_attn,
 98                    model.layers[il].wo, model.layers[il].bo,
 99                    Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il);
100            cb(cur, "attn_out", il);
101        }
102        if (il == n_layer - 1 && inp_out_ids) {
103            cur   = ggml_get_rows(ctx0,   cur, inp_out_ids);
104            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
105        }
106        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
107        cb(ffn_inp, "ffn_inp", il);
108
109        // feed-forward network (non-MoE)
110        if (model.layers[il].ffn_gate_inp == nullptr) {
111            cur = build_norm(ffn_inp,
112                    model.layers[il].ffn_norm, NULL,
113                    LLM_NORM_RMS, il);
114            cb(cur, "ffn_norm", il);
115
116            cur = build_ffn(cur,
117                    model.layers[il].ffn_up,   model.layers[il].ffn_up_b,   NULL,
118                    model.layers[il].ffn_gate, model.layers[il].ffn_gate_b, NULL,
119                    model.layers[il].ffn_down, model.layers[il].ffn_down_b, NULL,
120                    NULL,
121                    LLM_FFN_SILU, LLM_FFN_PAR, il);
122            cb(cur, "ffn_out", il);
123        } else {
124            ggml_tensor * ffn_inp_normed = build_norm(ffn_inp,
125                    model.layers[il].ffn_norm, NULL,
126                    LLM_NORM_RMS, il);
127            cb(cur, "ffn_norm", il);
128
129            ggml_tensor * moe_out = build_moe_ffn(ffn_inp_normed,
130                    model.layers[il].ffn_gate_inp,
131                    model.layers[il].ffn_up_exps,
132                    model.layers[il].ffn_gate_exps,
133                    model.layers[il].ffn_down_exps,
134                    nullptr,
135                    n_expert, n_expert_used,
136                    LLM_FFN_SILU, false,
137                    false, 0.0,
138                    LLAMA_EXPERT_GATING_FUNC_TYPE_SIGMOID,
139                    il);
140
141            // Shared experts
142            ggml_tensor * shexp_out = build_ffn(ffn_inp_normed,
143                model.layers[il].ffn_up_shexp,   NULL, NULL,
144                model.layers[il].ffn_gate_shexp, NULL, NULL,
145                model.layers[il].ffn_down_shexp, NULL, NULL,
146                NULL,
147                LLM_FFN_SILU, LLM_FFN_PAR, il);
148            cb(shexp_out, "ffn_moe_shexp", il);
149
150            cur = ggml_add(ctx0, moe_out, shexp_out);
151            cb(cur, "ffn_moe_out_merged", il);
152        }
153        cur = ggml_add(ctx0, cur, ffn_inp);
154        cb(cur, "ffn_out", il);
155
156        cur = build_cvec(cur, il);
157        cb(cur, "l_out", il);
158
159        // input for next layer
160        inpL = cur;
161    }
162    cur = inpL;
163
164    cur = build_norm(cur,
165            model.output_norm, NULL,
166            LLM_NORM_RMS, -1);
167
168    cb(cur, "result_norm", -1);
169    res->t_embd = cur;
170
171    // lm_head
172    cur = build_lora_mm(model.output, cur);
173
174    cb(cur, "result_output", -1);
175    res->t_logits = cur;
176
177    ggml_build_forward_expand(gf, cur);
178}