1#include "models.h"
2
3llm_build_gemma_embedding::llm_build_gemma_embedding(const llama_model & model, const llm_graph_params & params) :
4 llm_graph_context(params) {
5 const int64_t n_embd_head = hparams.n_embd_head_k;
6
7 ggml_tensor * cur;
8 ggml_tensor * inpL;
9
10 inpL = build_inp_embd(model.tok_embd);
11
12 // important: do not normalize weights for raw embeddings input (i.e. encoded image emdeddings)
13 inpL = ggml_scale(ctx0, inpL, ubatch.token ? sqrtf(n_embd) : 1.0f);
14 cb(inpL, "inp_scaled", -1);
15
16 // inp_pos - contains the positions
17 ggml_tensor * inp_pos = build_inp_pos();
18
19 auto * inp_attn = build_attn_inp_no_cache();
20
21 ggml_tensor * inp_out_ids = build_inp_out_ids();
22
23 for (int il = 0; il < n_layer; ++il) {
24 const float freq_base_l = model.get_rope_freq_base(cparams, il);
25 const float freq_scale_l = model.get_rope_freq_scale(cparams, il);
26
27 // norm
28 cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
29 cb(cur, "attn_norm", il);
30
31 // self-attention
32 {
33 // compute Q and K and RoPE them
34 ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
35 cb(Qcur, "Qcur", il);
36
37 ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
38 cb(Kcur, "Kcur", il);
39
40 ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
41 cb(Vcur, "Vcur", il);
42
43 Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
44 Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
45 Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
46
47 Qcur = build_norm(Qcur, model.layers[il].attn_q_norm, NULL, LLM_NORM_RMS, il);
48 cb(Qcur, "Qcur_normed", il);
49
50 Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
51 ext_factor, attn_factor, beta_fast, beta_slow);
52
53 Kcur = build_norm(Kcur, model.layers[il].attn_k_norm, NULL, LLM_NORM_RMS, il);
54 cb(Kcur, "Kcur_normed", il);
55
56 Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base_l, freq_scale_l,
57 ext_factor, attn_factor, beta_fast, beta_slow);
58
59 cb(Qcur, "Qcur", il);
60 cb(Kcur, "Kcur", il);
61 cb(Vcur, "Vcur", il);
62
63 // ref: https://github.com/google/gemma_pytorch/blob/014acb7ac4563a5f77c76d7ff98f31b568c16508/gemma/model.py#L315
64 Qcur = ggml_scale(ctx0, Qcur, hparams.f_attention_scale);
65
66 cur =
67 build_attn(inp_attn,
68 model.layers[il].wo, NULL,
69 Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f, il);
70 }
71
72 if (il == n_layer - 1 && inp_out_ids) {
73 cur = ggml_get_rows(ctx0, cur, inp_out_ids);
74 inpL = ggml_get_rows(ctx0, inpL, inp_out_ids);
75 }
76
77 cur = build_norm(cur, model.layers[il].attn_post_norm, NULL, LLM_NORM_RMS, il);
78 cb(cur, "attn_post_norm", il);
79
80 ggml_tensor * sa_out = ggml_add(ctx0, cur, inpL);
81 cb(sa_out, "sa_out", il);
82
83 cur = build_norm(sa_out, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
84 cb(cur, "ffn_norm", il);
85
86 // feed-forward network
87 {
88 cur = build_ffn(cur,
89 model.layers[il].ffn_up, NULL, NULL,
90 model.layers[il].ffn_gate, NULL, NULL,
91 model.layers[il].ffn_down, NULL, NULL,
92 NULL, LLM_FFN_GELU, LLM_FFN_PAR, il);
93 cb(cur, "ffn_out", il);
94 }
95
96 cur = build_norm(cur, model.layers[il].ffn_post_norm, NULL, LLM_NORM_RMS, -1);
97 cb(cur, "ffn_post_norm", -1);
98
99 cur = ggml_add(ctx0, cur, sa_out);
100
101 cur = build_cvec(cur, il);
102 cb(cur, "l_out", il);
103
104 // input for next layer
105 inpL = cur;
106 }
107
108 cur = inpL;
109
110 cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
111
112 cb(cur, "result_norm", -1);
113 res->t_embd = cur;
114
115 ggml_build_forward_expand(gf, cur);
116}