diff options
| author | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
|---|---|---|
| committer | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
| commit | b333b06772c89d96aacb5490d6a219fba7c09cc6 (patch) | |
| tree | 211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/src/models/llada-moe.cpp | |
| download | llmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz | |
Engage!
Diffstat (limited to 'llama.cpp/src/models/llada-moe.cpp')
| -rw-r--r-- | llama.cpp/src/models/llada-moe.cpp | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/llama.cpp/src/models/llada-moe.cpp b/llama.cpp/src/models/llada-moe.cpp new file mode 100644 index 0000000..5f64686 --- /dev/null +++ b/llama.cpp/src/models/llada-moe.cpp | |||
| @@ -0,0 +1,122 @@ | |||
| 1 | #include "models.h" | ||
| 2 | |||
| 3 | llm_build_llada_moe::llm_build_llada_moe(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { | ||
| 4 | const int64_t n_embd_head = hparams.n_embd_head_v; | ||
| 5 | |||
| 6 | GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); | ||
| 7 | GGML_ASSERT(n_embd_head == hparams.n_rot); | ||
| 8 | |||
| 9 | ggml_tensor * cur; | ||
| 10 | ggml_tensor * inpL; | ||
| 11 | |||
| 12 | inpL = build_inp_embd(model.tok_embd); | ||
| 13 | |||
| 14 | // inp_pos - contains the positions | ||
| 15 | ggml_tensor * inp_pos = build_inp_pos(); | ||
| 16 | |||
| 17 | auto * inp_attn = build_attn_inp_no_cache(); | ||
| 18 | |||
| 19 | ggml_tensor * inp_out_ids = build_inp_out_ids(); | ||
| 20 | |||
| 21 | for (int il = 0; il < n_layer; ++il) { | ||
| 22 | ggml_tensor * inpSA = inpL; | ||
| 23 | |||
| 24 | // norm | ||
| 25 | cur = build_norm(inpL, | ||
| 26 | model.layers[il].attn_norm, NULL, | ||
| 27 | LLM_NORM_RMS, il); | ||
| 28 | cb(cur, "attn_norm", il); | ||
| 29 | |||
| 30 | // self_attention | ||
| 31 | { | ||
| 32 | // compute Q and K and RoPE them | ||
| 33 | ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); | ||
| 34 | cb(Qcur, "Qcur", il); | ||
| 35 | |||
| 36 | ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); | ||
| 37 | cb(Kcur, "Kcur", il); | ||
| 38 | |||
| 39 | ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); | ||
| 40 | cb(Vcur, "Vcur", il); | ||
| 41 | |||
| 42 | Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); | ||
| 43 | Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); | ||
| 44 | Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); | ||
| 45 | |||
| 46 | Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il); | ||
| 47 | cb(Qcur, "Qcur_normed", il); | ||
| 48 | |||
| 49 | Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il); | ||
| 50 | cb(Kcur, "Kcur_normed", il); | ||
| 51 | |||
| 52 | Qcur = ggml_rope_ext( | ||
| 53 | ctx0, Qcur, inp_pos, nullptr, | ||
| 54 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, | ||
| 55 | ext_factor, attn_factor, beta_fast, beta_slow | ||
| 56 | ); | ||
| 57 | |||
| 58 | Kcur = ggml_rope_ext( | ||
| 59 | ctx0, Kcur, inp_pos, nullptr, | ||
| 60 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, | ||
| 61 | ext_factor, attn_factor, beta_fast, beta_slow | ||
| 62 | ); | ||
| 63 | |||
| 64 | cb(Qcur, "Qcur", il); | ||
| 65 | cb(Kcur, "Kcur", il); | ||
| 66 | cb(Vcur, "Vcur", il); | ||
| 67 | |||
| 68 | cur = build_attn(inp_attn, | ||
| 69 | model.layers[il].wo, NULL, | ||
| 70 | Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il); | ||
| 71 | } | ||
| 72 | if (il == n_layer - 1 && inp_out_ids) { | ||
| 73 | cur = ggml_get_rows(ctx0, cur, inp_out_ids); | ||
| 74 | inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); | ||
| 75 | } | ||
| 76 | ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); | ||
| 77 | cb(ffn_inp, "ffn_inp", il); | ||
| 78 | |||
| 79 | // MoE branch | ||
| 80 | cur = build_norm(ffn_inp, | ||
| 81 | model.layers[il].ffn_norm, NULL, | ||
| 82 | LLM_NORM_RMS, il); | ||
| 83 | cb(cur, "ffn_norm", il); | ||
| 84 | |||
| 85 | cur = build_moe_ffn(cur, | ||
| 86 | model.layers[il].ffn_gate_inp, | ||
| 87 | model.layers[il].ffn_up_exps, | ||
| 88 | model.layers[il].ffn_gate_exps, | ||
| 89 | model.layers[il].ffn_down_exps, | ||
| 90 | nullptr, | ||
| 91 | n_expert, n_expert_used, | ||
| 92 | LLM_FFN_SILU, false, | ||
| 93 | false, 0.0, | ||
| 94 | LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX, | ||
| 95 | il); | ||
| 96 | cb(cur, "ffn_moe_out", il); | ||
| 97 | |||
| 98 | cur = ggml_add(ctx0, cur, ffn_inp); | ||
| 99 | |||
| 100 | cur = build_cvec(cur, il); | ||
| 101 | cb(cur, "l_out", il); | ||
| 102 | |||
| 103 | // input for next layer | ||
| 104 | inpL = cur; | ||
| 105 | } | ||
| 106 | cur = inpL; | ||
| 107 | |||
| 108 | cur = build_norm(cur, | ||
| 109 | model.output_norm, NULL, | ||
| 110 | LLM_NORM_RMS, -1); | ||
| 111 | |||
| 112 | cb(cur, "result_norm", -1); | ||
| 113 | res->t_embd = cur; | ||
| 114 | |||
| 115 | // lm_head | ||
| 116 | cur = build_lora_mm(model.output, cur); | ||
| 117 | |||
| 118 | cb(cur, "result_output", -1); | ||
| 119 | res->t_logits = cur; | ||
| 120 | |||
| 121 | ggml_build_forward_expand(gf, cur); | ||
| 122 | } | ||
