1#include "models.h"
  2
  3
  4
  5llm_build_grovemoe::llm_build_grovemoe(const llama_model & model, const llm_graph_params & params) :
  6    llm_graph_context(params) {
  7    const int64_t n_embd_head    = hparams.n_embd_head_v;
  8    const int64_t n_chunk_expert = n_expert / hparams.n_group_experts;
  9
 10    GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
 11    GGML_ASSERT(n_embd_head == hparams.n_rot);
 12
 13    ggml_tensor * cur;
 14    ggml_tensor * inpL;
 15
 16    inpL = build_inp_embd(model.tok_embd);
 17
 18    // inp_pos - contains the positions
 19    ggml_tensor * inp_pos = build_inp_pos();
 20
 21    auto * inp_attn = build_attn_inp_kv();
 22
 23    ggml_tensor * inp_out_ids = build_inp_out_ids();
 24
 25    for (int il = 0; il < n_layer; ++il) {
 26        ggml_tensor * inpSA = inpL;
 27
 28        // norm
 29        cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
 30        cb(cur, "attn_norm", il);
 31
 32        // self_attention
 33        {
 34            // compute Q and K and RoPE them
 35            ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
 36            cb(Qcur, "Qcur", il);
 37
 38            ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
 39            cb(Kcur, "Kcur", il);
 40
 41            ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
 42            cb(Vcur, "Vcur", il);
 43
 44            Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
 45            Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
 46            Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
 47
 48            Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
 49            cb(Qcur, "Qcur_normed", il);
 50
 51            Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
 52                                 ext_factor, attn_factor, beta_fast, beta_slow);
 53
 54            Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
 55            cb(Kcur, "Kcur_normed", il);
 56
 57            Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
 58                                 ext_factor, attn_factor, beta_fast, beta_slow);
 59
 60            cb(Qcur, "Qcur", il);
 61            cb(Kcur, "Kcur", il);
 62            cb(Vcur, "Vcur", il);
 63
 64            cur = build_attn(inp_attn,
 65                    model.layers[il].wo, model.layers[il].bo,
 66                    Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
 67        }
 68
 69        if (il == n_layer - 1 && inp_out_ids) {
 70            cur   = ggml_get_rows(ctx0, cur, inp_out_ids);
 71            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
 72        }
 73
 74        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
 75        cb(ffn_inp, "ffn_inp", il);
 76
 77        // MoE branch
 78        cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
 79        cb(cur, "ffn_norm", il);
 80
 81        ggml_tensor * probs = build_lora_mm(model.layers[il].ffn_gate_inp, cur);  // [n_expert, n_tokens]
 82        cb(probs, "ffn_moe_logits", il);
 83
 84        ggml_tensor * moe_out =
 85            build_moe_ffn(cur,
 86                nullptr,
 87                model.layers[il].ffn_up_exps,
 88                model.layers[il].ffn_gate_exps,
 89                model.layers[il].ffn_down_exps,
 90                nullptr,
 91                n_expert, n_expert_used,
 92                LLM_FFN_SILU, true,
 93                false, 0.0,
 94                LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
 95                il,
 96                probs);
 97        cb(moe_out, "ffn_moe_out", il);
 98        cur = moe_out;
 99
100        // TODO: Only do the expert selection and weights once
101        moe_out = build_moe_ffn(cur,
102                    nullptr,
103                    model.layers[il].ffn_up_chexps,
104                    model.layers[il].ffn_gate_chexps,
105                    model.layers[il].ffn_down_chexps,
106                    nullptr,
107                    n_chunk_expert, n_expert_used > n_chunk_expert ? n_chunk_expert : n_expert_used,
108                    LLM_FFN_SILU, true,
109                    false, 0.0,
110                    LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
111                    il,
112                    probs);
113        cb(moe_out, "ffn_adj_moe_out", il);
114
115        cur = ggml_add(ctx0, cur, ggml_scale(ctx0, moe_out, hparams.expert_group_scale));
116        cb(cur, "ffn_final_moe_out", il);
117
118        cur = ggml_add(ctx0, cur, ffn_inp);
119
120        cur = build_cvec(cur, il);
121        cb(cur, "l_out", il);
122
123        // input for next layer
124        inpL = cur;
125    }
126
127    cur = inpL;
128
129    cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
130
131    cb(cur, "result_norm", -1);
132    res->t_embd = cur;
133
134    // lm_head
135    cur = build_lora_mm(model.output, cur);
136
137    cb(cur, "result_output", -1);
138    res->t_logits = cur;
139
140    ggml_build_forward_expand(gf, cur);
141}