diff options
| author | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
|---|---|---|
| committer | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
| commit | b333b06772c89d96aacb5490d6a219fba7c09cc6 (patch) | |
| tree | 211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/src/models/hunyuan-dense.cpp | |
| download | llmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz | |
Engage!
Diffstat (limited to 'llama.cpp/src/models/hunyuan-dense.cpp')
| -rw-r--r-- | llama.cpp/src/models/hunyuan-dense.cpp | 132 |
1 files changed, 132 insertions, 0 deletions
diff --git a/llama.cpp/src/models/hunyuan-dense.cpp b/llama.cpp/src/models/hunyuan-dense.cpp new file mode 100644 index 0000000..7d5dcc7 --- /dev/null +++ b/llama.cpp/src/models/hunyuan-dense.cpp | |||
| @@ -0,0 +1,132 @@ | |||
| 1 | #include "models.h" | ||
| 2 | |||
| 3 | llm_build_hunyuan_dense::llm_build_hunyuan_dense(const llama_model & model, const llm_graph_params & params) : llm_graph_context(params) { | ||
| 4 | const int64_t n_embd_head = hparams.n_embd_head_v; | ||
| 5 | |||
| 6 | GGML_ASSERT(n_embd_head == hparams.n_embd_head_k); | ||
| 7 | GGML_ASSERT(n_embd_head == hparams.n_rot); | ||
| 8 | |||
| 9 | ggml_tensor * cur; | ||
| 10 | ggml_tensor * inpL; | ||
| 11 | |||
| 12 | inpL = build_inp_embd(model.tok_embd); | ||
| 13 | |||
| 14 | // inp_pos - contains the positions | ||
| 15 | ggml_tensor * inp_pos = build_inp_pos(); | ||
| 16 | |||
| 17 | auto * inp_attn = build_attn_inp_kv(); | ||
| 18 | |||
| 19 | const float kq_scale = 1.0f / sqrtf(float(n_embd_head)); | ||
| 20 | |||
| 21 | ggml_tensor * inp_out_ids = build_inp_out_ids(); | ||
| 22 | |||
| 23 | for (int il = 0; il < n_layer; ++il) { | ||
| 24 | ggml_tensor * inpSA = inpL; | ||
| 25 | |||
| 26 | // norm | ||
| 27 | cur = build_norm(inpL, | ||
| 28 | model.layers[il].attn_norm, NULL, | ||
| 29 | LLM_NORM_RMS, il); | ||
| 30 | cb(cur, "attn_norm", il); | ||
| 31 | // self-attention | ||
| 32 | { | ||
| 33 | // rope freq factors for llama3; may return nullptr for llama2 and other models | ||
| 34 | ggml_tensor * rope_factors = model.get_rope_factors(cparams, il); | ||
| 35 | |||
| 36 | // compute Q and K and RoPE them | ||
| 37 | ggml_tensor * Qcur = build_lora_mm(model.layers[il].wq, cur); | ||
| 38 | cb(Qcur, "Qcur", il); | ||
| 39 | if (model.layers[il].bq) { | ||
| 40 | Qcur = ggml_add(ctx0, Qcur, model.layers[il].bq); | ||
| 41 | cb(Qcur, "Qcur", il); | ||
| 42 | } | ||
| 43 | ggml_tensor * Kcur = build_lora_mm(model.layers[il].wk, cur); | ||
| 44 | cb(Kcur, "Kcur", il); | ||
| 45 | if (model.layers[il].bk) { | ||
| 46 | Kcur = ggml_add(ctx0, Kcur, model.layers[il].bk); | ||
| 47 | cb(Kcur, "Kcur", il); | ||
| 48 | } | ||
| 49 | ggml_tensor * Vcur = build_lora_mm(model.layers[il].wv, cur); | ||
| 50 | cb(Vcur, "Vcur", il); | ||
| 51 | if (model.layers[il].bv) { | ||
| 52 | Vcur = ggml_add(ctx0, Vcur, model.layers[il].bv); | ||
| 53 | cb(Vcur, "Vcur", il); | ||
| 54 | } | ||
| 55 | Qcur = ggml_reshape_3d(ctx0, Qcur, n_embd_head, n_head, n_tokens); | ||
| 56 | Kcur = ggml_reshape_3d(ctx0, Kcur, n_embd_head, n_head_kv, n_tokens); | ||
| 57 | Vcur = ggml_reshape_3d(ctx0, Vcur, n_embd_head, n_head_kv, n_tokens); | ||
| 58 | |||
| 59 | Qcur = ggml_rope_ext( | ||
| 60 | ctx0, Qcur, inp_pos, rope_factors, | ||
| 61 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, | ||
| 62 | ext_factor, attn_factor, beta_fast, beta_slow | ||
| 63 | ); | ||
| 64 | |||
| 65 | cb(Qcur, "Qcur", il); | ||
| 66 | cb(Kcur, "Kcur", il); | ||
| 67 | cb(Vcur, "Vcur", il); | ||
| 68 | |||
| 69 | Kcur = ggml_rope_ext( | ||
| 70 | ctx0, Kcur, inp_pos, rope_factors, | ||
| 71 | n_rot, rope_type, n_ctx_orig, freq_base, freq_scale, | ||
| 72 | ext_factor, attn_factor, beta_fast, beta_slow | ||
| 73 | ); | ||
| 74 | |||
| 75 | Kcur = build_norm(Kcur, | ||
| 76 | model.layers[il].attn_k_norm, nullptr, | ||
| 77 | LLM_NORM_RMS, il); | ||
| 78 | cb(Kcur, "Kcur_norm", il); | ||
| 79 | |||
| 80 | Qcur = build_norm(Qcur, | ||
| 81 | model.layers[il].attn_q_norm, nullptr, | ||
| 82 | LLM_NORM_RMS, il); | ||
| 83 | cb(Qcur, "Qcur_norm", il); | ||
| 84 | |||
| 85 | cur = build_attn(inp_attn, | ||
| 86 | model.layers[il].wo, model.layers[il].bo, | ||
| 87 | Qcur, Kcur, Vcur, nullptr, nullptr, nullptr, kq_scale, il); | ||
| 88 | cb(cur, "attn_out", il); | ||
| 89 | } | ||
| 90 | if (il == n_layer - 1 && inp_out_ids) { | ||
| 91 | cur = ggml_get_rows(ctx0, cur, inp_out_ids); | ||
| 92 | inpSA = ggml_get_rows(ctx0, inpSA, inp_out_ids); | ||
| 93 | } | ||
| 94 | ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpSA); | ||
| 95 | cb(ffn_inp, "ffn_inp", il); | ||
| 96 | |||
| 97 | cur = build_norm(ffn_inp, | ||
| 98 | model.layers[il].ffn_norm, NULL, | ||
| 99 | LLM_NORM_RMS, il); | ||
| 100 | cb(cur, "ffn_norm", il); | ||
| 101 | // feed-forward network (non-MoE) | ||
| 102 | ggml_tensor * cur_mlp = build_ffn(cur, | ||
| 103 | model.layers[il].ffn_up, NULL, NULL, | ||
| 104 | model.layers[il].ffn_gate, NULL, NULL, | ||
| 105 | model.layers[il].ffn_down, NULL, NULL, | ||
| 106 | NULL, | ||
| 107 | LLM_FFN_SILU, LLM_FFN_PAR, il); | ||
| 108 | cb(cur_mlp, "ffn_out", il); | ||
| 109 | |||
| 110 | cur = ggml_add(ctx0, cur_mlp, ffn_inp); | ||
| 111 | |||
| 112 | cur = build_cvec(cur, il); | ||
| 113 | cb(cur, "l_out", il); | ||
| 114 | |||
| 115 | // input for next layer | ||
| 116 | inpL = cur; | ||
| 117 | } | ||
| 118 | cur = inpL; | ||
| 119 | |||
| 120 | cur = build_norm(cur, | ||
| 121 | model.output_norm, NULL, | ||
| 122 | LLM_NORM_RMS, -1); | ||
| 123 | |||
| 124 | cb(cur, "result_norm", -1); | ||
| 125 | res->t_embd = cur; | ||
| 126 | // lm_head | ||
| 127 | cur = build_lora_mm(model.output, cur); | ||
| 128 | cb(cur, "result_output", -1); | ||
| 129 | res->t_logits = cur; | ||
| 130 | |||
| 131 | ggml_build_forward_expand(gf, cur); | ||
| 132 | } | ||
