1#include "models.h"
2
3llm_build_cogvlm::llm_build_cogvlm(const llama_model & model, const llm_graph_params & params) :
4 llm_graph_context(params) {
5 const int64_t n_embd_head = hparams.n_embd_head_v;
6 const float kq_scale = 1.0f / sqrtf(float(n_embd_head));
7
8 GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
9 GGML_ASSERT(n_embd_head == hparams.n_rot);
10
11 ggml_tensor * inpL;
12 ggml_tensor * cur;
13
14 inpL = build_inp_embd(model.tok_embd);
15
16 ggml_tensor * inp_pos = build_inp_pos();
17
18 auto * inp_attn = build_attn_inp_kv();
19
20 // check ubatch to see if we have input tokens (text)
21 // or an input embedding vector (image)
22 bool is_text;
23 if (ubatch.token) {
24 is_text = true;
25 } else {
26 is_text = false;
27 }
28
29 for (int il = 0; il < n_layer; ++il) {
30 // get either the text or image weight tensors
31 ggml_tensor *wqkv, *wo;
32 ggml_tensor *ffn_gate, *ffn_down, *ffn_up;
33
34 if (is_text) {
35 wqkv = model.layers[il].wqkv;
36 wo = model.layers[il].wo;
37 ffn_gate = model.layers[il].ffn_gate;
38 ffn_down = model.layers[il].ffn_down;
39 ffn_up = model.layers[il].ffn_up;
40 } else {
41 wqkv = model.layers[il].visexp_attn_wqkv;
42 wo = model.layers[il].visexp_attn_wo;
43 ffn_gate = model.layers[il].visexp_ffn_gate;
44 ffn_down = model.layers[il].visexp_ffn_down;
45 ffn_up = model.layers[il].visexp_ffn_up;
46 }
47
48 ggml_tensor * inpSA = inpL;
49 cur = build_norm(inpSA, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
50
51 // build self attention
52 {
53 ggml_tensor * qkv = build_lora_mm(wqkv, cur);
54
55 // split qkv into Q, K, V along the first dimension
56 ggml_tensor * Qcur =
57 ggml_view_3d(ctx0, qkv, n_embd_head, n_head, n_tokens, n_embd_head * sizeof(float), qkv->nb[1], 0);
58 ggml_tensor * Kcur = ggml_view_3d(ctx0, qkv, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float),
59 qkv->nb[1], n_embd * ggml_element_size(qkv));
60 ggml_tensor * Vcur = ggml_view_3d(ctx0, qkv, n_embd_head, n_head_kv, n_tokens, n_embd_head * sizeof(float),
61 qkv->nb[1], 2 * n_embd * ggml_element_size(qkv));
62
63 Qcur = ggml_rope(ctx0, Qcur, inp_pos, n_embd_head, rope_type);
64 Kcur = ggml_rope(ctx0, Kcur, inp_pos, n_embd_head, rope_type);
65
66 cur = build_attn(inp_attn,
67 wo, nullptr,
68 Qcur, Kcur, Vcur,
69 nullptr, nullptr, nullptr,
70 kq_scale, il);
71 cb(cur, "attn_out", il);
72 }
73
74 ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
75 cb(ffn_inp, "ffn_inp", il);
76
77 cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
78 cb(cur, "ffn_norm", il);
79
80 cur = build_ffn(cur,
81 ffn_up, NULL, NULL,
82 ffn_gate, NULL, NULL,
83 ffn_down, NULL, NULL,
84 NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
85
86 cur = ggml_add(ctx0, cur, ffn_inp);
87 cb(cur, "ffn_out", il);
88
89 inpL = cur;
90 }
91
92 cur = inpL;
93
94 cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
95 cb(cur, "result_norm", -1);
96 res->t_embd = cur;
97
98 cur = build_lora_mm(model.output, cur);
99 cb(cur, "result_output", -1);
100 res->t_logits = cur;
101 ggml_build_forward_expand(gf, cur);
102}