1#include "models.h"
 2
 3llm_build_llada::llm_build_llada(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) {
 4    // LLaDA is similar to LLaMA but uses non-causal attention for diffusion
 5    const int64_t n_embd_head = hparams.n_embd_head_v;
 6
 7    GGML_ASSERT(n_embd_head == hparams.n_embd_head_k);
 8    GGML_ASSERT(n_embd_head == hparams.n_rot);
 9
10    ggml_tensor * cur;
11    ggml_tensor * inpL;
12
13    inpL = build_inp_embd(model.tok_embd);
14
15    // inp_pos - contains the positions
16    ggml_tensor * inp_pos = build_inp_pos();
17
18    // Non-causal attention for diffusion
19    auto * inp_attn = build_attn_inp_no_cache();
20
21    ggml_tensor * inp_out_ids = build_inp_out_ids();
22
23    for (int il = 0; il < n_layer; ++il) {
24        ggml_tensor * inpSA = inpL;
25
26        // norm
27        cur = build_norm(inpL, model.layers[il].attn_norm, NULL, LLM_NORM_RMS, il);
28        cb(cur, "attn_norm", il);
29
30        // self-attention
31        {
32            // compute separate Q, K, V projections without bias, matching LLaDALlamaBlock
33            ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur);
34            ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur);
35            ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur);
36
37            cb(Qcur, "Qcur", il);
38            cb(Kcur, "Kcur", il);
39            cb(Vcur, "Vcur", il);
40
41            Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens);
42            Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens);
43            Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens);
44
45            Qcur = ggml_rope_ext(ctx0, Qcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
46                                    ext_factor, attn_factor, beta_fast, beta_slow);
47
48            Kcur = ggml_rope_ext(ctx0, Kcur, inp_pos, nullptr, n_rot, rope_type, n_ctx_orig, freq_base, freq_scale,
49                                    ext_factor, attn_factor, beta_fast, beta_slow);
50
51            cb(Qcur, "Qcur", il);
52            cb(Kcur, "Kcur", il);
53            cb(Vcur, "Vcur", il);
54
55            cur = build_attn(inp_attn,
56                    model.layers[il].wo, NULL,
57                    Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, 1.0f / sqrtf(float(n_embd_head)), il);
58        }
59        if (il == n_layer - 1 && inp_out_ids) {
60            cur   = ggml_get_rows(ctx0, cur, inp_out_ids);
61            inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids);
62        }
63        ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA);
64        cb(ffn_inp, "ffn_inp", il);
65
66        // feed-forward network
67        cur = build_norm(ffn_inp, model.layers[il].ffn_norm, NULL, LLM_NORM_RMS, il);
68        cb(cur, "ffn_norm", il);
69
70        cur = build_ffn(cur,
71                model.layers[il].ffn_up, NULL, NULL,
72                model.layers[il].ffn_gate, NULL, NULL,
73                model.layers[il].ffn_down, NULL, NULL,
74                NULL, LLM_FFN_SILU, LLM_FFN_PAR, il);
75        cb(cur, "ffn_out", il);
76
77        cur = ggml_add(ctx0, cur, ffn_inp);
78
79        cur = build_cvec(cur, il);
80        cb(cur, "l_out", il);
81
82        // input for next layer
83        inpL = cur;
84    }
85    cur = inpL;
86
87    cur = build_norm(cur, model.output_norm, NULL, LLM_NORM_RMS, -1);
88
89    cb(cur, "result_norm", -1);
90    res->t_embd = cur;
91
92    // lm_head
93    cur = build_lora_mm(model.output, cur);
94
95    cb(cur, "result_output", -1);
96    res->t_logits = cur;
97
98    ggml_build_forward_expand(gf, cur);
99}