1#include "models.h"
2
3llm_build_modern_bert::llm_build_modern_bert(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
4 const int64_t n_embd_head = hparams.n_embd_head_v;
5 const int64_t n_embd_gqa = hparams.n_embd_v_gqa();
6
7 GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
8
9 ggml_tensor * cur;
10 ggml_tensor * inpL;
11 ggml_tensor * inp_pos = build_inp_pos();
12
13 // construct input embeddings (token, type, position)
14 inpL = build_inp_embd(model.tok_embd);
15 cb(inpL, "inp_embd", -1);
16
17 // embed layer norm
18 inpL = build_norm(inpL, model.tok_norm, nullptr, LLM_NORM, -1);
19 cb(inpL, "inp_norm", -1);
20
21 ggml_tensor * inp_out_ids = build_inp_out_ids();
22
23 auto * inp_attn = build_attn_inp_no_cache();
24
25 for (int il = 0; il < n_layer; ++il) {
26 const float freq_base_l = model.get_rope_freq_base(cparams, il);
27 const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
28
29 cur = inpL;
30
31 // attention layer norm
32 if (model.layers[il].attn_norm) {
33 cur = build_norm(inpL,
34 model.layers[il].attn_norm, NULL,
35 LLM_NORM, il);
36 cb(cur, "attn_norm", il);
37 }
38
39 // self attention
40 cur = build_lora_mm(model.layers[il].wqkv, cur);
41 cb(cur, "wqkv", il);
42
43 const size_t type_size = ggml_type_size(cur->type);
44
45 ggml_tensor * Qcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head, n_tokens, n_embd_head*type_size, cur->nb[1], 0*type_size*(n_embd));
46 ggml_tensor * Kcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*type_size, cur->nb[1], 1*type_size*(n_embd));
47 ggml_tensor * Vcur = ggml_view_3d(ctx0, cur, n_embd_head, n_head_kv, n_tokens, n_embd_head*type_size, cur->nb[1], 1*type_size*(n_embd + n_embd_gqa));
48
49 // RoPE
50 Qcur = ggml_rope_ext(
51 ctx0, Qcur, inp_pos, nullptr,
52 n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
53 ext_factor, attn_factor, beta_fast, beta_slow
54 );
55
56 Kcur = ggml_rope_ext(
57 ctx0, Kcur, inp_pos, nullptr,
58 n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
59 ext_factor, attn_factor, beta_fast, beta_slow
60 );
61
62 cb(Qcur, "Qcur", il);
63 cb(Kcur, "Kcur", il);
64 cb(Vcur, "Vcur", il);
65
66 cur = build_attn(inp_attn,
67 model.layers[il].wo, nullptr,
68 Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f/sqrtf(float(n_embd_head)), il);
69 cb(cur, "kqv_out", il);
70
71 if (il == n_layer - 1 && inp_out_ids) {
72 cur = ggml_get_rows(ctx0, cur, inp_out_ids);
73 inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
74 }
75
76 // re-add the layer input
77 ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL);
78 cb(ffn_inp, "ffn_inp", il);
79
80 // attention layer norm
81 cur = build_norm(ffn_inp,
82 model.layers[il].ffn_norm, NULL,
83 LLM_NORM, il);
84 cb(cur, "ffn_norm", il);
85
86 cur = build_ffn(cur,
87 model.layers[il].ffn_up, NULL, NULL,
88 NULL, NULL, NULL,
89 model.layers[il].ffn_down, NULL, NULL,
90 NULL,
91 LLM_FFN_GEGLU, LLM_FFN_SEQ, il);
92
93 // attentions bypass the intermediate layer
94 cur = ggml_add(ctx0, cur, ffn_inp);
95
96 // input for next layer
97 inpL = cur;
98 }
99
100 cur = inpL;
101
102 cur = build_norm(cur,
103 model.output_norm, NULL,
104 LLM_NORM, -1);
105 cb(cur, "final_norm_out", -1);
106
107 if (hparams.pooling_type == LLAMA_POOLING_TYPE_CLS) {
108 // extracting cls token
109 cur = ggml_view_1d(ctx0, cur, hparams.n_embd, 0);
110 cb(cur, "cls_pooled_embd", -1);
111 }
112
113 cb(cur, "res_embd", -1);
114 res->t_embd = cur;
115 ggml_build_forward_expand(gf, cur);
116}