diff options
| author | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
|---|---|---|
| committer | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-12 20:57:17 +0100 |
| commit | b333b06772c89d96aacb5490d6a219fba7c09cc6 (patch) | |
| tree | 211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/src/models/rwkv7.cpp | |
| download | llmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz | |
Engage!
Diffstat (limited to 'llama.cpp/src/models/rwkv7.cpp')
| -rw-r--r-- | llama.cpp/src/models/rwkv7.cpp | 90 |
1 files changed, 90 insertions, 0 deletions
diff --git a/llama.cpp/src/models/rwkv7.cpp b/llama.cpp/src/models/rwkv7.cpp new file mode 100644 index 0000000..5caf655 --- /dev/null +++ b/llama.cpp/src/models/rwkv7.cpp | |||
| @@ -0,0 +1,90 @@ | |||
| 1 | #include "models.h" | ||
| 2 | |||
| 3 | llm_build_rwkv7::llm_build_rwkv7(const llama_model & model, const llm_graph_params & params) : | ||
| 4 | llm_build_rwkv7_base(model, params) { | ||
| 5 | GGML_ASSERT(hparams.token_shift_count == 2); | ||
| 6 | |||
| 7 | ggml_tensor * cur; | ||
| 8 | ggml_tensor * inpL; | ||
| 9 | ggml_tensor * v_first = nullptr; | ||
| 10 | |||
| 11 | inpL = build_inp_embd(model.tok_embd); | ||
| 12 | inpL = build_norm(inpL, model.tok_norm, model.tok_norm_b, LLM_NORM, -1); | ||
| 13 | |||
| 14 | auto * rs_inp = build_rs_inp(); | ||
| 15 | |||
| 16 | const auto n_embd = hparams.n_embd; | ||
| 17 | const auto n_seq_tokens = ubatch.n_seq_tokens; | ||
| 18 | const auto n_seqs = ubatch.n_seqs; | ||
| 19 | |||
| 20 | ggml_tensor * inp_out_ids = build_inp_out_ids(); | ||
| 21 | |||
| 22 | for (int il = 0; il < n_layer; ++il) { | ||
| 23 | const llama_layer * layer = &model.layers[il]; | ||
| 24 | inpL = ggml_reshape_3d(ctx0, inpL, n_embd, n_seq_tokens, n_seqs); | ||
| 25 | |||
| 26 | ggml_tensor * token_shift = build_rwkv_token_shift_load(rs_inp, ubatch, il); | ||
| 27 | |||
| 28 | ggml_tensor * att_shift = | ||
| 29 | ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], token_shift->nb[2], 0); | ||
| 30 | ggml_tensor * ffn_shift = ggml_view_3d(ctx0, token_shift, n_embd, 1, n_seqs, token_shift->nb[1], | ||
| 31 | token_shift->nb[2], n_embd * ggml_element_size(token_shift)); | ||
| 32 | |||
| 33 | ggml_tensor * att_norm = build_norm(inpL, layer->attn_norm, layer->attn_norm_b, LLM_NORM, il); | ||
| 34 | cb(att_norm, "attn_norm", il); | ||
| 35 | |||
| 36 | ggml_tensor * x_prev = ggml_concat( | ||
| 37 | ctx0, att_shift, | ||
| 38 | ggml_view_3d(ctx0, att_norm, n_embd, n_seq_tokens - 1, n_seqs, att_norm->nb[1], att_norm->nb[2], 0), 1); | ||
| 39 | |||
| 40 | cur = build_rwkv7_time_mix(rs_inp, att_norm, x_prev, v_first, ubatch, il); | ||
| 41 | |||
| 42 | ggml_tensor * ffn_inp = ggml_add(ctx0, cur, inpL); | ||
| 43 | cb(ffn_inp, "ffn_inp", il); | ||
| 44 | |||
| 45 | ggml_tensor * ffn_norm = build_norm(ffn_inp, layer->attn_norm_2, layer->attn_norm_2_b, LLM_NORM, il); | ||
| 46 | cb(ffn_norm, "ffn_norm", il); | ||
| 47 | |||
| 48 | x_prev = ggml_concat( | ||
| 49 | ctx0, ffn_shift, | ||
| 50 | ggml_view_3d(ctx0, ffn_norm, n_embd, n_seq_tokens - 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], 0), 1); | ||
| 51 | |||
| 52 | token_shift = ggml_concat(ctx0, | ||
| 53 | ggml_view_3d(ctx0, att_norm, n_embd, 1, n_seqs, att_norm->nb[1], att_norm->nb[2], | ||
| 54 | (n_seq_tokens - 1) * n_embd * ggml_element_size(att_norm)), | ||
| 55 | ggml_view_3d(ctx0, ffn_norm, n_embd, 1, n_seqs, ffn_norm->nb[1], ffn_norm->nb[2], | ||
| 56 | (n_seq_tokens - 1) * n_embd * ggml_element_size(ffn_norm)), | ||
| 57 | 1); | ||
| 58 | ggml_build_forward_expand(gf, build_rwkv_token_shift_store(token_shift, ubatch, il)); | ||
| 59 | |||
| 60 | ffn_inp = ggml_reshape_2d(ctx0, ffn_inp, n_embd, n_tokens); | ||
| 61 | ffn_norm = ggml_reshape_2d(ctx0, ffn_norm, n_embd, n_tokens); | ||
| 62 | x_prev = ggml_reshape_2d(ctx0, x_prev, n_embd, n_tokens); | ||
| 63 | |||
| 64 | if (il == n_layer - 1 && inp_out_ids) { | ||
| 65 | ffn_inp = ggml_get_rows(ctx0, ffn_inp, inp_out_ids); | ||
| 66 | ffn_norm = ggml_get_rows(ctx0, ffn_norm, inp_out_ids); | ||
| 67 | x_prev = ggml_get_rows(ctx0, x_prev, inp_out_ids); | ||
| 68 | } | ||
| 69 | cur = build_rwkv7_channel_mix(layer, ffn_norm, x_prev, LLM_ARCH_RWKV7); | ||
| 70 | cur = ggml_add(ctx0, cur, ffn_inp); | ||
| 71 | |||
| 72 | cur = build_cvec(cur, il); | ||
| 73 | cb(cur, "l_out", il); | ||
| 74 | |||
| 75 | // input for next layer | ||
| 76 | inpL = cur; | ||
| 77 | } | ||
| 78 | cur = inpL; | ||
| 79 | cur = build_norm(cur, model.output_norm, model.output_norm_b, LLM_NORM, -1); | ||
| 80 | |||
| 81 | cb(cur, "result_norm", -1); | ||
| 82 | res->t_embd = cur; | ||
| 83 | |||
| 84 | cur = build_lora_mm(model.output, cur); | ||
| 85 | |||
| 86 | cb(cur, "result_output", -1); | ||
| 87 | res->t_logits = cur; | ||
| 88 | |||
| 89 | ggml_build_forward_expand(gf, cur); | ||
| 90 | } | ||
