1#include "models.h"
  2
  3
  4
  5llm_build_bailingmoe2::llm_build_bailingmoe2(const llama_model & model, const llm_graph_params & params) :
  6    llm_graph_context(params) {
  7    const int64_t n_embd_head = hparams.n_embd_head_v;
  8    const int64_t n_embd_gqa  = hparams.n_embd_v_gqa();
  9
 10    GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
 11
 12    ggml_tensor * cur;
 13    ggml_tensor * inpL;
 14
 15    inpL = build_inp_embd(model.tok_embd);
 16
 17    // inp_pos - contains the positions
 18    ggml_tensor * inp_pos = build_inp_pos();
 19
 20    auto * inp_attn = build_attn_inp_kv();
 21
 22    ggml_tensor * inp_out_ids = build_inp_out_ids();
 23
 24    const int n_transformer_layers = n_layer - hparams.nextn_predict_layers;
 25    for (int il = 0; il < n_transformer_layers; ++il) {
 26        ggml_tensor * inpSA = inpL;
 27
 28        // norm
 29        cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
 30        cb(cur, "attn_norm", il);
 31
 32        // self_attention
 33        {
 34            cur = build_lora_mm(model.layers[il].wqkv, cur);
 35            cb(cur, "wqkv", il);
 36
 37            ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head * sizeof(float),
 38                                              cur->nb[1], 0 * sizeof(float) * (n_embd));
 39            ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float),
 40                                              cur->nb[1], 1 * sizeof(float) * (n_embd));
 41            ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float),
 42                                              cur->nb[1], 1 * sizeof(float) * (n_embd + n_embd_gqa));
 43
 44            Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
 45            cb(Qcur, "Qcur_normed", il);
 46
 47            Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
 48                                 ext_factor, attn_factor, beta_fast, beta_slow);
 49
 50            Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
 51            cb(Kcur, "Kcur_normed", il);
 52
 53            Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
 54                                 ext_factor, attn_factor, beta_fast, beta_slow);
 55
 56            cb(Qcur, "Qcur", il);
 57            cb(Kcur, "Kcur", il);
 58            cb(Vcur, "Vcur", il);
 59
 60            cur = build_attn(inp_attn,
 61                    model.layers[il].wo, model.layers[il].bo,
 62                    Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
 63        }
 64
 65        if (il == n_transformer_layers - 1 && inp_out_ids) {
 66            cur   = ggml_get_rows(ctx0, cur, inp_out_ids);
 67            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
 68        }
 69
 70        ggml_tensor * sa_out = ggml_add(ctx0, cur, inpSA);
 71        cb(sa_out, "sa_out", il);
 72
 73        // MoE branch
 74        cur = build_norm(sa_out, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
 75        cb(cur, "ffn_norm", il);
 76
 77        if (static_cast<uint32_t>(il) < hparams.n_layer_dense_lead) {
 78            cur = build_ffn(cur,
 79                    model.layers[il].ffn_up, NULL, NULL,
 80                    model.layers[il].ffn_gate, NULL, NULL,
 81                    model.layers[il].ffn_down, NULL, NULL,
 82                    NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
 83            cb(cur, "ffn_out", il);
 84        } else {
 85            ggml_tensor * moe_out = build_moe_ffn(cur,
 86                model.layers[il].ffn_gate_inp,
 87                model.layers[il].ffn_up_exps,
 88                model.layers[il].ffn_gate_exps,
 89                model.layers[il].ffn_down_exps,
 90                model.layers[il].ffn_exp_probs_b,
 91                n_expert, n_expert_used,
 92                LLM_FFN_SILU, hparams.expert_weights_norm,
 93                true, hparams.expert_weights_scale,
 94                (llama_expert_gating_func_type) hparams.expert_gating_func,
 95                il);
 96            cb(moe_out, "ffn_moe_out", il);
 97
 98            {
 99                ggml_tensor * ffn_shexp =
100                    build_ffn(cur,
101                        model.layers[il].ffn_up_shexp, NULL, NULL,
102                        model.layers[il].ffn_gate_shexp, NULL, NULL,
103                        model.layers[il].ffn_down_shexp, NULL, NULL,
104                        NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
105                cb(ffn_shexp, "ffn_shexp", il);
106
107                cur = ggml_add(ctx0, moe_out, ffn_shexp);
108                cb(cur, "ffn_out", il);
109            }
110        }
111
112        cur = ggml_add(ctx0, cur, sa_out);
113
114        cur = build_cvec(cur, il);
115        cb(cur, "l_out", il);
116
117        // input for next layer
118        inpL = cur;
119    }
120
121    cur = inpL;
122
123    cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
124
125    cb(cur, "result_norm", -1);
126    res->t_embd = cur;
127
128    // lm_head
129    cur = build_lora_mm(model.output, cur);
130
131    cb(cur, "result_output", -1);
132    res->t_logits = cur;
133
134    ggml_build_forward_expand(gf, cur);
135}