1#include "models.h"
2
3
4llm_build_dbrx::llm_build_dbrx(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
5 const int64_t n_embd_head = hparams.n_embd_head_v;
6 const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
7
8 GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
9 GGML_ASSERT(n_embd_head == hparams.n_rot);
10
11 ggml_tensor * cur;
12 ggml_tensor * inpL;
13
14 inpL = build_inp_embd(model.tok_embd);
15
16 // inp_pos - contains the positions
17 ggml_tensor * inp_pos = build_inp_pos();
18
19 auto * inp_attn = build_attn_inp_kv();
20
21 ggml_tensor * inp_out_ids = build_inp_out_ids();
22
23 for (int il = 0; il < n_layer; ++il) {
24 ggml_tensor * inpSA = inpL;
25
26 // norm
27 cur = build_norm(inpL,
28 model.layers[il].attn_norm, NULL,
29 LLM_NORM, il);
30 cb(cur, "attn_norm", il);
31
32 // self-attention
33 {
34 ggml_tensor * Qcur = nullptr;
35 ggml_tensor * Kcur = nullptr;
36 ggml_tensor * Vcur = nullptr;
37
38 cur = build_lora_mm(model.layers[il].wqkv, cur);
39 cb(cur, "wqkv", il);
40
41 cur = ggml_clamp(ctx0, cur, -hparams.f_clamp_kqv, hparams.f_clamp_kqv);
42 cb(cur, "wqkv_clamped", il);
43
44 Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 0*sizeof(float)*(n_embd));
45 Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd));
46 Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*sizeof(float), cur->nb[1], 1*sizeof(float)*(n_embd + n_embd_gqa));
47
48 Qcur = ggml_rope_ext(
49 ctx0, Qcur, inp_pos, nullptr,
50 n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
51 ext_factor, attn_factor, beta_fast, beta_slow
52 );
53
54 Kcur = ggml_rope_ext(
55 ctx0, Kcur, inp_pos, nullptr,
56 n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
57 ext_factor, attn_factor, beta_fast, beta_slow
58 );
59
60 cb(Qcur, "Qcur", il);
61 cb(Kcur, "Kcur", il);
62 cb(Vcur, "Vcur", il);
63
64 cur = build_attn(inp_attn,
65 model.layers[il].wo, NULL,
66 Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
67 }
68
69 if (il == n_layer - 1 && inp_out_ids) {
70 cur = ggml_get_rows(ctx0, cur, inp_out_ids);
71 inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
72 }
73
74 ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
75 cb(ffn_inp, "ffn_inp", il);
76
77 // feed-forward network
78 // MoE branch
79 cur = build_norm(ffn_inp,
80 model.layers[il].attn_out_norm, NULL,
81 LLM_NORM, il);
82 cb(cur, "attn_out_norm", il);
83
84 cur = build_moe_ffn(cur,
85 model.layers[il].ffn_gate_inp,
86 model.layers[il].ffn_up_exps,
87 model.layers[il].ffn_gate_exps,
88 model.layers[il].ffn_down_exps,
89 nullptr,
90 n_expert, n_expert_used,
91 LLM_FFN_SILU, true,
92 false, 0.0,
93 LLAMA_EXPERT_GATING_FUNC_TYPE_SOFTMAX,
94 il);
95 cb(cur, "ffn_moe_out", il);
96
97 cur = ggml_add(ctx0, cur, ffn_inp);
98 cb(cur, "ffn_out", il);
99
100 cur = build_cvec(cur, il);
101 cb(cur, "l_out", il);
102
103 // input for next layer
104 inpL = cur;
105 }
106
107 cur = inpL;
108
109 cur = build_norm(cur,
110 model.output_norm, NULL,
111 LLM_NORM, -1);
112
113 cb(cur, "result_norm", -1);
114 res->t_embd = cur;
115
116 // lm_head
117 cur = build_lora_mm(model.output, cur);
118
119 cb(cur, "result_output", -1);
120 res->t_logits = cur;
121
122 ggml_build_forward_expand(gf, cur);
123}