1#include "models.h"
2
3llm_build_openai_moe_iswa::llm_build_openai_moe_iswa(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
4 ggml_tensor * cur;
5 ggml_tensor * inpL;
6
7 inpL = build_inp_embd(model.tok_embd);
8
9 // inp_pos - contains the positions
10 ggml_tensor * inp_pos = build_inp_pos();
11
12 auto * inp_attn = build_attn_inp_kv_iswa();
13
14 ggml_tensor * inp_out_ids = build_inp_out_ids();
15
16 for (int il = 0; il < n_layer; ++il) {
17 const float freq_base_l = model.get_rope_freq_base (cparams, il);
18 const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
19
20 ggml_tensor * inpSA = inpL;
21
22 // norm
23 cur = build_norm(inpL,
24 model.layers[il].attn_norm, nullptr,
25 LLM_NORM_RMS, il);
26 cb(cur, "attn_norm", il);
27
28 // self-attention
29 {
30 // compute Q and K and RoPE them
31 ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
32 cb(Qcur, "Qcur", il);
33 if (model.layers[il].bq) {
34 Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq);
35 cb(Qcur, "Qcur", il);
36 }
37 ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
38 cb(Kcur, "Kcur", il);
39 if (model.layers[il].bk) {
40 Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk);
41 cb(Kcur, "Kcur", il);
42 }
43 ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
44 cb(Vcur, "Vcur", il);
45 if (model.layers[il].bv) {
46 Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv);
47 cb(Vcur, "Vcur", il);
48 }
49 Qcur = ggml_reshape_3d(ctx0, Qcur, n_rot, n_head, n_tokens);
50 Kcur = ggml_reshape_3d(ctx0, Kcur, n_rot, n_head_kv, n_tokens);
51 Vcur = ggml_reshape_3d(ctx0, Vcur, n_rot, n_head_kv, n_tokens);
52
53 Qcur = ggml_rope_ext(
54 ctx0, Qcur, inp_pos, nullptr,
55 n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
56 ext_factor, attn_factor, beta_fast, beta_slow
57 );
58
59 Kcur = ggml_rope_ext(
60 ctx0, Kcur, inp_pos, nullptr,
61 n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
62 ext_factor, attn_factor, beta_fast, beta_slow
63 );
64
65 cb(Qcur, "Qcur", il);
66 cb(Kcur, "Kcur", il);
67 cb(Vcur, "Vcur", il);
68
69 cur = build_attn(inp_attn,
70 model.layers[il].wo, model.layers[il].bo,
71 Qcur, Kcur, Vcur, nullptr, model.layers[il].attn_sinks, nullptr, 1.0f/sqrtf(float(n_rot)), il);
72
73 cb(cur, "attn_out", il);
74 }
75 if (il == n_layer - 1) {
76 // skip computing output for unused tokens
77 cur = ggml_get_rows(ctx0, cur, inp_out_ids);
78 inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
79 }
80 ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
81 cb(ffn_inp, "ffn_inp", il);
82
83 cur = ffn_inp;
84 cur = build_norm(cur,
85 model.layers[il].attn_post_norm, nullptr,
86 LLM_NORM_RMS, il);
87 cb(cur, "attn_post_norm", il);
88
89 // MoE branch
90 cur = build_moe_ffn(cur,
91 model.layers[il].ffn_gate_inp, model.layers[il].ffn_gate_inp_b,
92 model.layers[il].ffn_up_exps, model.layers[il].ffn_up_exps_b,
93 model.layers[il].ffn_gate_exps, model.layers[il].ffn_gate_exps_b,
94 model.layers[il].ffn_down_exps, model.layers[il].ffn_down_exps_b,
95 nullptr,
96 n_expert, n_expert_used,
97 LLM_FFN_SWIGLU_OAI_MOE, false,
98 false, 0.0,
99 LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX_WEIGHT,
100 il);
101 cb(cur, "ffn_moe_out", il);
102
103 cur = ggml_add(ctx0, cur, ffn_inp);
104
105 cur = build_cvec(cur, il);
106 cb(cur, "l_out", il);
107
108 // input for next layer
109 inpL = cur;
110 }
111 cur = inpL;
112
113 cur = build_norm(cur,
114 model.output_norm, NULL,
115 LLM_NORM_RMS, -1);
116
117 cb(cur, "result_norm", -1);
118 res->t_embd = cur;
119
120 // lm_head
121 cur = build_lora_mm(model.output, cur);
122
123 cb(cur, "result_output", -1);
124 res->t_logits = cur;
125
126 ggml_build_forward_expand(gf, cur);
127}