1#include "models.h"
  2
  3
  4llm_build_exaone_moe::llm_build_exaone_moe(const llama_model & model, const llm_graph_params & params) :
  5    llm_graph_context(params) {
  6    const int64_t n_embd_head = hparams.n_embd_head_k;
  7
  8    GGML_ASSERT(n_embd_head == hparams.n_embd_head_v);
  9    GGML_ASSERT(n_embd_head == hparams.n_rot);
 10
 11    ggml_tensor * cur;
 12    ggml_tensor * inpL;
 13
 14    inpL = build_inp_embd(model.tok_embd);
 15
 16    // inp_pos - contains the positions
 17    ggml_tensor * inp_pos = build_inp_pos();
 18
 19    auto * inp_attn_iswa = build_attn_inp_kv_iswa();
 20
 21    ggml_tensor * inp_out_ids = build_inp_out_ids();
 22
 23    const int n_transformer_layers = n_layer - hparams.nextn_predict_layers;
 24    for (int il = 0; il < n_transformer_layers; ++il) {
 25        ggml_tensor * inpSA = inpL;
 26
 27        // use RoPE for SWA layers
 28        const bool is_local_layer = hparams.is_swa(il);
 29
 30        // norm
 31        cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
 32        cb(cur, "attn_norm", il);
 33
 34        // self-attention
 35        {
 36            ggml_tensor * rope_factors = model.get_rope_factors(cparams, il);
 37
 38            // compute Q and K and RoPE them
 39            ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
 40            cb(Qcur, "Qcur", il);
 41
 42            ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
 43            cb(Kcur, "Kcur", il);
 44
 45            ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
 46            cb(Vcur, "Vcur", il);
 47
 48            Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
 49            Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
 50            Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
 51
 52            Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
 53            Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
 54            cb(Qcur, "Qcur_normed", il);
 55            cb(Kcur, "Kcur_normed", il);
 56
 57            if (is_local_layer) {
 58                Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base,
 59                                     freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
 60
 61                Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, rope_factors, n_rot, rope_type, n_ctx_orig, freq_base,
 62                                     freq_scale, ext_factor, attn_factor, beta_fast, beta_slow);
 63            }
 64            cb(Qcur, "Qcur", il);
 65            cb(Kcur, "Kcur", il);
 66            cb(Vcur, "Vcur", il);
 67
 68            cur = build_attn(inp_attn_iswa,
 69                model.layers[il].wo, NULL,
 70                Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
 71            cb(cur, "attn_out", il);
 72        }
 73        if (il == n_transformer_layers - 1 && inp_out_ids) {
 74            cur   = ggml_get_rows(ctx0, cur, inp_out_ids);
 75            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
 76        }
 77        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
 78        cb(ffn_inp, "ffn_inp", il);
 79
 80        // norm
 81        cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
 82        cb(cur, "ffn_norm", il);
 83
 84        // feed-forward network
 85        if (model.layers[il].ffn_gate_inp == nullptr) {
 86            // dense branch
 87            cur = build_ffn(cur,
 88                    model.layers[il].ffn_up, NULL, NULL,
 89                    model.layers[il].ffn_gate, NULL, NULL,
 90                    model.layers[il].ffn_down, NULL, NULL, NULL,
 91                    LLM_FFN_SILU, LLM_FFN_PAR, il);
 92            cb(cur, "ffn_out", il);
 93        } else {
 94            // MoE branch
 95            ggml_tensor * moe_out = build_moe_ffn(cur,
 96                model.layers[il].ffn_gate_inp,
 97                model.layers[il].ffn_up_exps,
 98                model.layers[il].ffn_gate_exps,
 99                model.layers[il].ffn_down_exps,
100                model.layers[il].ffn_exp_probs_b,
101                n_expert, n_expert_used,
102                LLM_FFN_SILU, hparams.expert_weights_norm,
103                true, hparams.expert_weights_scale,
104                (llama_expert_gating_func_type) hparams.expert_gating_func,
105                il);
106            cb(moe_out, "ffn_moe_out", il);
107
108            // FFN shared expert
109            {
110                ggml_tensor * ffn_shexp =
111                    build_ffn(cur,
112                        model.layers[il].ffn_up_shexp, NULL, NULL,
113                        model.layers[il].ffn_gate_shexp, NULL, NULL,
114                        model.layers[il].ffn_down_shexp, NULL, NULL,
115                        NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
116                cb(ffn_shexp, "ffn_shexp", il);
117
118                cur = ggml_add(ctx0, moe_out, ffn_shexp);
119                cb(cur, "ffn_out", il);
120            }
121        }
122
123        cur = ggml_add(ctx0, cur, ffn_inp);
124
125        cur = build_cvec(cur, il);
126        cb(cur, "l_out", il);
127
128        // input for next layer
129        inpL = cur;
130    }
131    cur = inpL;
132
133    // final norm
134    cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
135
136    cb(cur, "result_norm", -1);
137    res->t_embd = cur;
138
139    // lm_head
140    cur = build_lora_mm(model.output, cur);
141
142    cb(cur, "result_output", -1);
143    res->t_logits = cur;
144
145    ggml_build_forward_expand(gf, cur);
146}