1#include "llama-memory-recurrent.h"
2
3#include "llama-impl.h"
4#include "llama-io.h"
5#include "llama-batch.h"
6#include "llama-model.h"
7
8#include <algorithm>
9#include <cassert>
10#include <cstring>
11#include <limits>
12#include <map>
13#include <stdexcept>
14
15//
16// llama_memory_recurrent
17//
18
19llama_memory_recurrent::llama_memory_recurrent(
20 const llama_model & model,
21 ggml_type type_r,
22 ggml_type type_s,
23 bool offload,
24 uint32_t mem_size,
25 uint32_t n_seq_max,
26 const layer_filter_cb & filter) : hparams(model.hparams), n_seq_max(n_seq_max) {
27 const int32_t n_layer = hparams.n_layer;
28
29 head = 0;
30 size = mem_size;
31 used = 0;
32
33 cells.clear();
34 cells.resize(mem_size);
35
36 // define a comparator for the buft -> ctx map to ensure that the order is well-defined:
37 struct ggml_backend_buft_comparator {
38 bool operator()(const ggml_backend_buffer_type_t & lhs, const ggml_backend_buffer_type_t & rhs) const {
39 return strcmp(ggml_backend_buft_name(lhs), ggml_backend_buft_name(rhs)) < 0;
40 }
41 };
42 std::map<ggml_backend_buffer_type_t, ggml_context_ptr, ggml_backend_buft_comparator> ctx_map;
43
44 // create a context for each buffer type
45 auto ctx_for_buft = [&](ggml_backend_buffer_type_t buft) -> ggml_context * {
46 auto it = ctx_map.find(buft);
47 if (it == ctx_map.end()) {
48 ggml_init_params params = {
49 /*.mem_size =*/ size_t(2u*n_layer*ggml_tensor_overhead()),
50 /*.mem_buffer =*/ NULL,
51 /*.no_alloc =*/ true,
52 };
53
54 ggml_context * ctx = ggml_init(params);
55 if (!ctx) {
56 return nullptr;
57 }
58
59 ctx_map.emplace(buft, ctx);
60
61 return ctx;
62 }
63
64 return it->second.get();
65 };
66
67 r_l.resize(n_layer);
68 s_l.resize(n_layer);
69
70 for (int i = 0; i < n_layer; i++) {
71 if (filter && !filter(i)) {
72 LLAMA_LOG_DEBUG("%s: layer %3d: skipped\n", __func__, i);
73 continue;
74 }
75
76 const char * dev_name = "CPU";
77
78 ggml_backend_buffer_type_t buft = ggml_backend_cpu_buffer_type();
79
80 if (offload) {
81 auto * dev = model.dev_layer(i);
82 buft = ggml_backend_dev_buffer_type(dev);
83
84 dev_name = ggml_backend_dev_name(dev);
85 }
86
87 LLAMA_LOG_DEBUG("%s, layer %3d: dev = %s\n", __func__, i, dev_name);
88
89 ggml_context * ctx = ctx_for_buft(buft);
90 if (!ctx) {
91 throw std::runtime_error("failed to create ggml context for rs cache");
92 }
93
94 ggml_tensor * r = ggml_new_tensor_1d(ctx, type_r, hparams.n_embd_r()*mem_size);
95 ggml_tensor * s = ggml_new_tensor_1d(ctx, type_s, hparams.n_embd_s()*mem_size);
96 ggml_format_name(r, "cache_r_l%d", i);
97 ggml_format_name(s, "cache_s_l%d", i);
98 r_l[i] = r;
99 s_l[i] = s;
100 }
101
102 // allocate tensors and initialize the buffers to avoid NaNs in the padding
103 for (auto & [buft, ctx] : ctx_map) {
104 ggml_backend_buffer_t buf = ggml_backend_alloc_ctx_tensors_from_buft(ctx.get(), buft);
105 if (!buf) {
106 throw std::runtime_error("failed to allocate buffer for rs cache");
107 }
108 ggml_backend_buffer_clear(buf, 0);
109 LLAMA_LOG_INFO("%s: %10s RS buffer size = %8.2f MiB\n", __func__, ggml_backend_buffer_name(buf), ggml_backend_buffer_get_size(buf)/1024.0/1024.0);
110 ctxs_bufs.emplace_back(std::move(ctx), buf);
111 }
112
113 {
114 const size_t memory_size_r = size_r_bytes();
115 const size_t memory_size_s = size_s_bytes();
116
117 LLAMA_LOG_INFO("%s: size = %7.2f MiB (%6u cells, %3d layers, %2u seqs), R (%s): %7.2f MiB, S (%s): %7.2f MiB\n", __func__,
118 (float)(memory_size_r + memory_size_s) / (1024.0f * 1024.0f), mem_size, n_layer, n_seq_max,
119 ggml_type_name(type_r), (float)memory_size_r / (1024.0f * 1024.0f),
120 ggml_type_name(type_s), (float)memory_size_s / (1024.0f * 1024.0f));
121 }
122}
123
124void llama_memory_recurrent::clear(bool data) {
125 for (int32_t i = 0; i < (int32_t) size; ++i) {
126 cells[i].pos = -1;
127 cells[i].seq_id.clear();
128 cells[i].src = -1;
129 cells[i].tail = -1;
130 }
131
132 head = 0;
133 used = 0;
134
135 if (data) {
136 for (auto & [_, buf] : ctxs_bufs) {
137 ggml_backend_buffer_clear(buf.get(), 0);
138 }
139 }
140}
141
142bool llama_memory_recurrent::seq_rm(llama_seq_id seq_id, llama_pos p0, llama_pos p1) {
143 //printf("[DEBUG] calling llama_memory_recurrent::seq_rm` with `seq_id=%d, p0=%d, p1=%d`\n", seq_id, p0, p1);
144 uint32_t new_head = size;
145
146 if (p0 < 0) {
147 p0 = 0;
148 }
149
150 if (p1 < 0) {
151 p1 = std::numeric_limits<llama_pos>::max();
152 }
153
154 // models like Mamba or RWKV can't have a state partially erased at the end
155 // of the sequence because their state isn't preserved for previous tokens
156 if (seq_id >= (int64_t) size) {
157 // could be fatal
158 return false;
159 }
160 if (0 <= seq_id) {
161 int32_t & tail_id = cells[seq_id].tail;
162 if (tail_id >= 0) {
163 const auto & cell = cells[tail_id];
164 // partial intersection is invalid if it includes the final pos
165 if (0 < p0 && p0 <= cell.pos && p1 > cell.pos) {
166 //printf("[DEBUG] inside `llama_memory_recurrent::seq_rm`: partial intersection is invalid, so returning false\n");
167 return false;
168 }
169 // invalidate tails which will be cleared
170 if (p0 <= cell.pos && cell.pos < p1) {
171 tail_id = -1;
172 }
173 }
174 } else {
175 // seq_id is negative, then the range should include everything or nothing
176 if (p0 != p1 && (p0 != 0 || p1 != std::numeric_limits<llama_pos>::max())) {
177 //printf("[DEBUG] inside `llama_memory_recurrent::seq_rm`: `seq_id` is negative, so returning false\n");
178 return false;
179 }
180 }
181
182 for (uint32_t i = 0; i < size; ++i) {
183 if (cells[i].pos >= p0 && cells[i].pos < p1) {
184 if (seq_id < 0) {
185 cells[i].seq_id.clear();
186 } else if (cells[i].has_seq_id(seq_id)) {
187 cells[i].seq_id.erase(seq_id);
188 } else {
189 continue;
190 }
191 if (cells[i].is_empty()) {
192 // keep count of the number of used cells
193 if (cells[i].pos >= 0) {
194 used--;
195 }
196 cells[i].pos = -1;
197 cells[i].src = -1;
198 if (new_head == size) {
199 new_head = i;
200 }
201 }
202 }
203 }
204
205 // If we freed up a slot, set head to it so searching can start there.
206 if (new_head != size && new_head < head) {
207 head = new_head;
208 }
209
210 return true;
211}
212
213void llama_memory_recurrent::seq_cp(llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) {
214 if (seq_id_src == seq_id_dst) {
215 return;
216 }
217
218 if (p0 < 0) {
219 p0 = 0;
220 }
221
222 if (p1 < 0) {
223 p1 = std::numeric_limits<llama_pos>::max();
224 }
225
226 if ((uint32_t) seq_id_dst < size && (uint32_t) seq_id_src < size) {
227 auto & tail_src = cells[seq_id_src];
228 auto & tail_dst = cells[seq_id_dst];
229 if (tail_dst.tail >= 0) {
230 // clear destination seq_id if it wasn't empty
231 auto & cell_dst = cells[tail_dst.tail];
232
233 cell_dst.seq_id.erase(seq_id_dst);
234 tail_dst.tail = -1;
235 if (cell_dst.seq_id.empty()) {
236 cell_dst.pos = -1;
237 cell_dst.src = -1;
238 used -= 1;
239 }
240 }
241 if (tail_src.tail >= 0) {
242 auto & cell_src = cells[tail_src.tail];
243
244 cell_src.seq_id.insert(seq_id_dst);
245 tail_dst.tail = tail_src.tail;
246 }
247 }
248}
249
250void llama_memory_recurrent::seq_keep(llama_seq_id seq_id) {
251 uint32_t new_head = size;
252
253 for (uint32_t i = 0; i < size; ++i) {
254 if ((llama_seq_id) i != seq_id) {
255 cells[i].tail = -1;
256 }
257
258 if (!cells[i].has_seq_id(seq_id)) {
259 if (cells[i].pos >= 0) {
260 used--;
261 }
262
263 cells[i].pos = -1;
264 cells[i].src = -1;
265 cells[i].seq_id.clear();
266
267 if (new_head == size){
268 new_head = i;
269 }
270 } else {
271 cells[i].seq_id.clear();
272 cells[i].seq_id.insert(seq_id);
273 }
274 }
275
276 // If we freed up a slot, set head to it so searching can start there.
277 if (new_head != size && new_head < head) {
278 head = new_head;
279 }
280}
281
282void llama_memory_recurrent::seq_add(llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) {
283 if (shift == 0) {
284 return;
285 }
286
287 if (p0 < 0) {
288 p0 = 0;
289 }
290
291 if (p1 < 0) {
292 p1 = std::numeric_limits<llama_pos>::max();
293 }
294
295 // If there is no range then return early to avoid looping over the
296 if (p0 == p1) {
297 return;
298 }
299
300 // for Mamba-like or RWKV models, only the pos needs to be shifted
301 if (0 <= seq_id && seq_id < (int64_t) size) {
302 const int32_t tail_id = cells[seq_id].tail;
303 if (tail_id >= 0) {
304 auto & cell = cells[tail_id];
305 if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
306 cell.pos += shift;
307 }
308 }
309 }
310}
311
312void llama_memory_recurrent::seq_div(llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) {
313 if (d == 1) {
314 return;
315 }
316
317 if (p0 < 0) {
318 p0 = 0;
319 }
320
321 if (p1 < 0) {
322 p1 = std::numeric_limits<llama_pos>::max();
323 }
324
325 // If there is no range then return early to avoid looping over the cache.
326 if (p0 == p1) {
327 return;
328 }
329
330 // for Mamba-like or RWKV models, only the pos needs to be changed
331 if (0 <= seq_id && seq_id < (int64_t) size) {
332 const int32_t tail_id = cells[seq_id].tail;
333 if (tail_id >= 0) {
334 auto & cell = cells[tail_id];
335 if (cell.has_seq_id(seq_id) && p0 <= cell.pos && cell.pos < p1) {
336 cell.pos /= d;
337 }
338 }
339 }
340}
341
342llama_pos llama_memory_recurrent::seq_pos_min(llama_seq_id seq_id) const {
343 llama_pos result = std::numeric_limits<llama_pos>::max();
344
345 for (uint32_t i = 0; i < size; ++i) {
346 if (cells[i].has_seq_id(seq_id)) {
347 result = std::min(result, cells[i].pos);
348 }
349 }
350
351 if (result == std::numeric_limits<llama_pos>::max()) {
352 result = -1;
353 }
354
355 return result;
356}
357
358llama_pos llama_memory_recurrent::seq_pos_max(llama_seq_id seq_id) const {
359 llama_pos result = -1;
360
361 for (uint32_t i = 0; i < size; ++i) {
362 if (cells[i].has_seq_id(seq_id)) {
363 result = std::max(result, cells[i].pos);
364 }
365 }
366
367 return result;
368}
369
370std::map<ggml_backend_buffer_type_t, size_t> llama_memory_recurrent::memory_breakdown() const {
371 std::map<ggml_backend_buffer_type_t, size_t> ret;
372 for (const auto & [_, buf] : ctxs_bufs) {
373 ret[ggml_backend_buffer_get_type(buf.get())] += ggml_backend_buffer_get_size(buf.get());
374 }
375 return ret;
376}
377
378llama_memory_context_ptr llama_memory_recurrent::init_batch(llama_batch_allocr & balloc, uint32_t n_ubatch, bool embd_all) {
379 do {
380 balloc.split_reset();
381
382 std::vector<llama_ubatch> ubatches;
383 while (true) {
384 llama_ubatch ubatch;
385
386 if (embd_all) {
387 // if all tokens are output, split by sequence
388 ubatch = balloc.split_seq(n_ubatch);
389 } else {
390 // TODO: non-sequential equal split can be done if using unified KV cache
391 // for simplicity, we always use sequential equal split for now
392 ubatch = balloc.split_equal(n_ubatch, true);
393 }
394
395 if (ubatch.n_tokens == 0) {
396 break;
397 }
398
399 ubatches.push_back(std::move(ubatch)); // NOLINT
400 }
401
402 if (balloc.get_n_used() < balloc.get_n_tokens()) {
403 // failed to find a suitable split
404 break;
405 }
406
407 if (!prepare(ubatches)) {
408 break;
409 }
410
411 return std::make_unique<llama_memory_recurrent_context>(this, std::move(ubatches));
412 } while (false);
413
414 return std::make_unique<llama_memory_recurrent_context>(LLAMA_MEMORY_STATUS_FAILED_PREPARE);
415}
416
417llama_memory_context_ptr llama_memory_recurrent::init_full() {
418 return std::make_unique<llama_memory_recurrent_context>(this);
419}
420
421llama_memory_context_ptr llama_memory_recurrent::init_update(llama_context * lctx, bool optimize) {
422 GGML_UNUSED(lctx);
423 GGML_UNUSED(optimize);
424
425 return std::make_unique<llama_memory_recurrent_context>(LLAMA_MEMORY_STATUS_NO_UPDATE);
426}
427
428bool llama_memory_recurrent::prepare(const std::vector<llama_ubatch> & ubatches) {
429 // simply remember the full state because it is very small for this type of cache
430 // TODO: optimize
431 auto org_cells = cells;
432 auto org_used = used;
433 auto org_head = head;
434
435 bool success = true;
436
437 for (const auto & ubatch : ubatches) {
438 if (!find_slot(ubatch)) {
439 success = false;
440 break;
441 }
442 }
443
444 // restore the original state
445 cells = std::move(org_cells);
446 used = org_used;
447 head = org_head;
448
449 return success;
450}
451
452bool llama_memory_recurrent::find_slot(const llama_ubatch & ubatch) {
453 const uint32_t n_seq_tokens = ubatch.n_seq_tokens;
454 const uint32_t n_seqs = ubatch.n_seqs;
455
456 // if we have enough unused cells before the current head ->
457 // better to start searching from the beginning of the cache, hoping to fill it
458 if (head > used + 2*n_seqs) {
459 head = 0;
460 }
461
462 // For recurrent state architectures (like Mamba or RWKV),
463 // each cache cell can store the state for a whole sequence.
464 // A slot should be always be contiguous.
465
466 // can only process batches with an equal number of new tokens in each sequence
467 GGML_ASSERT(ubatch.equal_seqs());
468
469 int32_t min = size - 1;
470 int32_t max = 0;
471
472 // everything should fit if all seq_ids are smaller than the max
473 for (uint32_t s = 0; s < n_seqs; ++s) {
474 const uint32_t i = s*n_seq_tokens; // first token of sequence set s
475 const uint32_t n_seq_id = ubatch.n_seq_id[i];
476
477 for (uint32_t j = 0; j < n_seq_id; ++j) {
478 const llama_seq_id seq_id = ubatch.seq_id[i][j];
479
480 if (seq_id < 0 || (uint32_t) seq_id >= size) {
481 // too big seq_id
482 // TODO: would it be possible to resize the cache instead?
483 LLAMA_LOG_ERROR("%s: seq_id=%d >= n_seq_max=%u Try using a bigger --parallel value\n", __func__, seq_id, n_seq_max);
484 return false;
485 }
486 if (j > 0) {
487 auto & seq = cells[seq_id];
488 if (seq.tail >= 0) {
489 auto & cell = cells[seq.tail];
490 // clear cells from seq_ids that become shared
491 // (should not normally happen, but let's handle it anyway)
492 cell.seq_id.erase(seq_id);
493 seq.tail = -1;
494 if (cell.seq_id.empty()) {
495 cell.pos = -1;
496 cell.src = -1;
497 used -= 1;
498 }
499 }
500 }
501 }
502 }
503
504#ifndef NDEBUG
505 {
506 std::vector<int32_t> tails_verif;
507 tails_verif.assign(size, -1);
508 for (uint32_t i = 0; i < size; ++i) {
509 auto & cell = cells[i];
510 for (llama_seq_id seq_id : cell.seq_id) {
511 if (tails_verif[seq_id] != -1) {
512 LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tails_verif[seq_id]);
513 }
514 tails_verif[seq_id] = i;
515 }
516 }
517 for (uint32_t i = 0; i < size; ++i) {
518 if (tails_verif[i] != cells[i].tail) {
519 LLAMA_LOG_ERROR("%s: wrong tail for seq_id %d, (%d instead of %d)\n", __func__, i, cells[i].tail, tails_verif[i]);
520 }
521 }
522 }
523#endif
524
525 // find next empty cell
526 uint32_t next_empty_cell = head;
527
528 for (uint32_t i = 0; i < size; ++i) {
529 if (next_empty_cell >= size) { next_empty_cell -= size; }
530 auto & cell = cells[next_empty_cell];
531 if (cell.is_empty()) { break; }
532 next_empty_cell += 1;
533 }
534
535 // find usable cell range
536 for (uint32_t s = 0; s < n_seqs; ++s) {
537 const uint32_t i = s*n_seq_tokens;
538 const llama_seq_id seq_id = ubatch.seq_id[i][0];
539 auto & seq_meta = cells[seq_id];
540 bool has_cell = false;
541 if (seq_meta.tail >= 0) {
542 auto & cell = cells[seq_meta.tail];
543 GGML_ASSERT(cell.has_seq_id(seq_id));
544 // does this seq_id "own" the cell?
545 if (cell.seq_id.size() == 1) { has_cell = true; }
546 }
547 if (!has_cell) {
548 auto & empty_cell = cells[next_empty_cell];
549 GGML_ASSERT(empty_cell.is_empty());
550 // copy old tail into the empty cell
551 if (seq_meta.tail >= 0) {
552 auto & orig_cell = cells[seq_meta.tail];
553 empty_cell.pos = orig_cell.pos;
554 empty_cell.src = orig_cell.src;
555 orig_cell.seq_id.erase(seq_id);
556 empty_cell.seq_id.insert(seq_id); // will be overwritten
557 GGML_ASSERT(!orig_cell.is_empty()); // has at least one remaining seq_id
558 }
559 seq_meta.tail = next_empty_cell;
560 // find next empty cell
561 if (s + 1 < n_seqs) {
562 for (uint32_t j = 0; j < size; ++j) {
563 next_empty_cell += 1;
564 if (next_empty_cell >= size) { next_empty_cell -= size; }
565 auto & cell = cells[next_empty_cell];
566 if (cell.is_empty()) { break; }
567 }
568 }
569 }
570 if (min > seq_meta.tail) { min = seq_meta.tail; }
571 if (max < seq_meta.tail) { max = seq_meta.tail; }
572 }
573
574 // gather and re-order
575 for (uint32_t s = 0; s < n_seqs; ++s) {
576 const uint32_t i = s*n_seq_tokens;
577 const int32_t dst_id = s + min;
578 const int32_t src_id = cells[ubatch.seq_id[i][0]].tail;
579 if (dst_id != src_id) {
580 auto & dst_cell = cells[dst_id];
581 auto & src_cell = cells[src_id];
582
583 std::swap(dst_cell.pos, src_cell.pos);
584 std::swap(dst_cell.src, src_cell.src);
585 std::swap(dst_cell.seq_id, src_cell.seq_id);
586
587 // swap tails
588 for (uint32_t j = 0; j < size; ++j) {
589 int32_t & tail = cells[j].tail;
590 if (tail == src_id) {
591 tail = dst_id;
592 } else if (tail == dst_id) {
593 tail = src_id;
594 }
595 }
596 }
597 }
598
599 // update the pos of the used seqs
600 for (uint32_t s = 0; s < n_seqs; ++s) {
601 const uint32_t i = s*n_seq_tokens;
602 const llama_pos last_pos = ubatch.pos[i + n_seq_tokens - 1];
603 const int32_t cell_id = s + min;
604 auto & cell = cells[cell_id];
605
606 if (cell.pos >= 0 && last_pos != cell.pos + (llama_pos) n_seq_tokens) {
607 // What should happen when the pos backtracks or skips a value?
608 // Clearing the state mid-batch would require special-casing which isn't done.
609 LLAMA_LOG_WARN("%s: non-consecutive token position %d after %d for sequence %d with %u new tokens\n",
610 __func__, last_pos, cell.pos, ubatch.seq_id[i][0], n_seq_tokens);
611 }
612 cell.pos = last_pos;
613 cell.seq_id.clear();
614 for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) {
615 const llama_seq_id seq_id = ubatch.seq_id[i][j];
616 cell.seq_id.insert(seq_id);
617 cells[seq_id].tail = cell_id;
618 }
619 }
620
621 // Find first cell without src refs, to use as the zero-ed state
622 {
623 // TODO: bake-in src refcounts in the cell metadata
624 std::vector<int32_t> refcounts(size, 0);
625 for (size_t i = 0; i < size; ++i) {
626 const int32_t src = cells[i].src;
627 if (src >= 0) {
628 refcounts[src] += 1;
629 }
630 }
631
632 rs_z = -1;
633 for (int i = min; i <= max; ++i) {
634 if (refcounts[i] == 0) {
635 rs_z = i;
636 break;
637 }
638 }
639
640 for (int i = min; i <= max; ++i) {
641 if (cells[i].src < 0) {
642 GGML_ASSERT(rs_z >= 0);
643 cells[i].src0 = rs_z;
644 } else {
645 // Stage the source ids for all used cells to allow correct seq_* behavior
646 // and still make these values available when setting the inputs
647 cells[i].src0 = cells[i].src;
648 }
649 cells[i].src = i; // avoid moving or clearing twice
650 }
651 }
652
653 // allow getting the range of used cells, from head to head + n
654 head = min;
655 n = max - min + 1;
656 used = std::count_if(cells.begin(), cells.end(),
657 [](const mem_cell & cell){ return !cell.is_empty(); });
658
659 // sanity check
660 return n >= n_seqs;
661}
662
663bool llama_memory_recurrent::get_can_shift() const {
664 // shifting the pos is trivial for recurrent models
665 return true;
666}
667
668size_t llama_memory_recurrent::total_size() const {
669 size_t size = 0;
670 for (const auto & [_, buf] : ctxs_bufs) {
671 size += ggml_backend_buffer_get_size(buf.get());
672 }
673
674 return size;
675}
676
677size_t llama_memory_recurrent::size_r_bytes() const {
678 size_t size_r_bytes = 0;
679
680 for (const auto & r : r_l) {
681 if (r != nullptr) {
682 size_r_bytes += ggml_nbytes(r);
683 }
684 }
685
686 return size_r_bytes;
687}
688
689size_t llama_memory_recurrent::size_s_bytes() const {
690 size_t size_s_bytes = 0;
691
692 for (const auto & s : s_l) {
693 if (s != nullptr) {
694 size_s_bytes += ggml_nbytes(s);
695 }
696 }
697
698 return size_s_bytes;
699}
700
701void llama_memory_recurrent::state_write(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) const {
702 GGML_UNUSED(flags);
703
704 std::vector<std::pair<uint32_t, uint32_t>> cell_ranges; // ranges, from inclusive, to exclusive
705 uint32_t cell_count = 0;
706
707 // Count the number of cells with the specified seq_id
708 // Find all the ranges of cells with this seq id (or all, when -1)
709 uint32_t cell_range_begin = size;
710 for (uint32_t i = 0; i < size; ++i) {
711 const auto & cell = cells[i];
712 if ((seq_id == -1 && !cell.is_empty()) || cell.has_seq_id(seq_id)) {
713 ++cell_count;
714 if (cell_range_begin == size) {
715 cell_range_begin = i;
716 }
717 } else {
718 if (cell_range_begin != size) {
719 cell_ranges.emplace_back(cell_range_begin, i);
720 cell_range_begin = size;
721 }
722 }
723 }
724 if (cell_range_begin != size) {
725 cell_ranges.emplace_back(cell_range_begin, size);
726 }
727
728 // DEBUG CHECK: Sum of cell counts in ranges should equal the total cell count
729 uint32_t cell_count_check = 0;
730 for (const auto & range : cell_ranges) {
731 cell_count_check += range.second - range.first;
732 }
733 GGML_ASSERT(cell_count == cell_count_check);
734
735 io.write(&cell_count, sizeof(cell_count));
736
737 state_write_meta(io, cell_ranges, seq_id);
738 state_write_data(io, cell_ranges);
739}
740
741void llama_memory_recurrent::state_read(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) {
742 GGML_UNUSED(flags);
743
744 uint32_t cell_count;
745 io.read_to(&cell_count, sizeof(cell_count));
746
747 bool res = true;
748
749 res = res && state_read_meta(io, cell_count, seq_id);
750 res = res && state_read_data(io, cell_count);
751
752 if (!res) {
753 if (seq_id == -1) {
754 clear(true);
755 } else {
756 seq_rm(seq_id, -1, -1);
757 }
758 throw std::runtime_error("failed to restore kv cache");
759 }
760}
761
762void llama_memory_recurrent::state_write_meta(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges, llama_seq_id seq_id) const {
763 for (const auto & range : cell_ranges) {
764 for (uint32_t i = range.first; i < range.second; ++i) {
765 const auto & cell = cells[i];
766 const llama_pos pos = cell.pos;
767 const uint32_t n_seq_id = seq_id == -1 ? cell.seq_id.size() : 0;
768
769 io.write(&pos, sizeof(pos));
770 io.write(&n_seq_id, sizeof(n_seq_id));
771
772 if (n_seq_id) {
773 for (auto seq_id : cell.seq_id) {
774 io.write(&seq_id, sizeof(seq_id));
775 }
776 }
777 }
778 }
779}
780
781void llama_memory_recurrent::state_write_data(llama_io_write_i & io, const std::vector<std::pair<uint32_t, uint32_t>> & cell_ranges) const {
782 const uint32_t s_trans = 0;
783 const uint32_t n_layer = hparams.n_layer;
784
785 io.write(&s_trans, sizeof(s_trans));
786 io.write(&n_layer, sizeof(n_layer));
787
788 // Iterate and write all the R tensors first, each row is a cell
789 // Get whole range at a time
790 for (uint32_t il = 0; il < n_layer; ++il) {
791 // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null)
792 if (r_l[il] == nullptr) continue;
793
794 // Write R tensor type
795 const int32_t r_type_i = (int32_t)r_l[il]->type;
796 io.write(&r_type_i, sizeof(r_type_i));
797
798 // Write row size of R tensor
799 const uint64_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
800 io.write(&r_size_row, sizeof(r_size_row));
801
802 // Write each range of cells of r_size_row length
803 for (const auto & range : cell_ranges) {
804 const size_t range_size = range.second - range.first;
805 const size_t buf_size = range_size * r_size_row;
806 io.write_tensor(r_l[il], range.first * r_size_row, buf_size);
807 }
808 }
809
810 if (!s_trans) {
811 for (uint32_t il = 0; il < n_layer; ++il) {
812 // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null)
813 if (s_l[il] == nullptr) continue;
814
815 // Write S tensor type
816 const int32_t s_type_i = (int32_t)s_l[il]->type;
817 io.write(&s_type_i, sizeof(s_type_i));
818
819 // Write row size of S tensor
820 const uint64_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
821 io.write(&s_size_row, sizeof(s_size_row));
822
823 // Write each range of S tensor rows
824 for (const auto & range : cell_ranges) {
825 const size_t range_size = range.second - range.first;
826 const size_t buf_size = range_size * s_size_row;
827 io.write_tensor(s_l[il], range.first * s_size_row, buf_size);
828 }
829 }
830 } else {
831 // When S tensor is transposed, we also need the element size and get the element ranges from each row
832 const uint32_t mem_size = size;
833 for (uint32_t il = 0; il < n_layer; ++il) {
834 // skip null layers (read_data will handle this by checking "r_l" and "s_l" for null)
835 if (s_l[il] == nullptr) continue;
836
837 const uint32_t n_embd_s = hparams.n_embd_s();
838
839 // Write S tensor type
840 const int32_t s_type_i = (int32_t)s_l[il]->type;
841 io.write(&s_type_i, sizeof(s_type_i));
842
843 // Write element size
844 const uint32_t s_size_el = ggml_type_size(s_l[il]->type);
845 io.write(&s_size_el, sizeof(s_size_el));
846
847 // Write GQA embedding size
848 io.write(&n_embd_s, sizeof(n_embd_s));
849
850 // For each row, we get the element values of each cell
851 for (uint32_t j = 0; j < n_embd_s; ++j) {
852 // Write each range of cells of s_size_el length
853 for (const auto & range : cell_ranges) {
854 const size_t range_size = range.second - range.first;
855 const size_t src_offset = (range.first + j * mem_size) * s_size_el;
856 const size_t buf_size = range_size * s_size_el;
857 io.write_tensor(s_l[il], src_offset, buf_size);
858 }
859 }
860 }
861 }
862}
863
864bool llama_memory_recurrent::state_read_meta(llama_io_read_i & io, uint32_t cell_count, llama_seq_id dest_seq_id) {
865 if (dest_seq_id != -1) {
866 // single sequence
867 seq_rm(dest_seq_id, -1, -1);
868
869 if (cell_count == 0) {
870 return true;
871 }
872
873 llama_batch_allocr balloc(hparams.n_pos_per_embd());
874
875 llama_ubatch ubatch = balloc.ubatch_reserve(cell_count, 1);
876
877 for (uint32_t i = 0; i < cell_count; ++i) {
878 llama_pos pos;
879 uint32_t n_seq_id;
880
881 io.read_to(&pos, sizeof(pos));
882 io.read_to(&n_seq_id, sizeof(n_seq_id));
883
884 if (n_seq_id != 0) {
885 LLAMA_LOG_ERROR("%s: invalid seq_id-agnostic kv cell\n", __func__);
886 return false;
887 }
888
889 ubatch.pos[i] = pos;
890 }
891 ubatch.n_seq_id[0] = 1;
892 ubatch.seq_id[0] = &dest_seq_id;
893
894 if (!find_slot(ubatch)) {
895 LLAMA_LOG_ERROR("%s: failed to find available cells in kv cache\n", __func__);
896 return false;
897 }
898
899 // DEBUG CHECK: kv.head should be our first cell, kv.head + cell_count - 1 should be our last cell (verify seq_id and pos values)
900 // Assume that this is one contiguous block of cells
901 GGML_ASSERT(head + cell_count <= size);
902 GGML_ASSERT(cells[head].pos == ubatch.pos[0]);
903 GGML_ASSERT(cells[head + cell_count - 1].pos == ubatch.pos[cell_count - 1]);
904 GGML_ASSERT(cells[head].has_seq_id(dest_seq_id));
905 GGML_ASSERT(cells[head + cell_count - 1].has_seq_id(dest_seq_id));
906 } else {
907 // whole KV cache restore
908
909 if (cell_count > size) {
910 LLAMA_LOG_ERROR("%s: not enough cells in kv cache\n", __func__);
911 return false;
912 }
913
914 clear(true);
915
916 for (uint32_t i = 0; i < cell_count; ++i) {
917 auto & cell = cells[i];
918
919 llama_pos pos;
920 uint32_t n_seq_id;
921
922 io.read_to(&pos, sizeof(pos));
923 io.read_to(&n_seq_id, sizeof(n_seq_id));
924
925 cell.pos = pos;
926
927 for (uint32_t j = 0; j < n_seq_id; ++j) {
928 llama_seq_id seq_id;
929 io.read_to(&seq_id, sizeof(seq_id));
930
931 // TODO: llama_memory_recurrent should have a notion of max sequences
932 //if (seq_id < 0 || (uint32_t) seq_id >= llama_n_seq_max(ctx)) {
933 if (seq_id < 0) {
934 //LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, %u)\n", __func__, seq_id, llama_n_seq_max(ctx));
935 LLAMA_LOG_ERROR("%s: invalid seq_id, %d is out of range [0, inf)\n", __func__, seq_id);
936 return false;
937 }
938
939 cell.seq_id.insert(seq_id);
940
941 int32_t & tail = cells[seq_id].tail;
942 if (tail != -1) {
943 LLAMA_LOG_ERROR("%s: duplicate tail for seq_id %d in cell %d and %d\n", __func__, seq_id, i, tail);
944 return false;
945 }
946 tail = i;
947 }
948 }
949
950 head = 0;
951 used = cell_count;
952 }
953
954 for (uint32_t i = 0; i < cell_count; ++i) {
955 uint32_t cell_id = head + i;
956 // make sure the recurrent states will keep their restored state
957 cells[cell_id].src = cell_id;
958 }
959
960 return true;
961}
962
963bool llama_memory_recurrent::state_read_data(llama_io_read_i & io, uint32_t cell_count) {
964 uint32_t s_trans;
965 uint32_t n_layer;
966 io.read_to(&s_trans, sizeof(s_trans));
967 io.read_to(&n_layer, sizeof(n_layer));
968
969 if (n_layer != hparams.n_layer) {
970 LLAMA_LOG_ERROR("%s: mismatched layer count (%u instead of %u)\n", __func__, n_layer, hparams.n_layer);
971 return false;
972 }
973 if (cell_count > size) {
974 LLAMA_LOG_ERROR("%s: not enough cells in kv cache to restore state (%u > %u)\n", __func__, cell_count, size);
975 return false;
976 }
977 if (false != (bool) s_trans) {
978 LLAMA_LOG_ERROR("%s: incompatible s transposition\n", __func__);
979 return false;
980 }
981
982 // For each layer, read the keys for each cell, one row is one cell, read as one contiguous block
983 for (uint32_t il = 0; il < n_layer; ++il) {
984 // skip null layers
985 if (r_l[il] == nullptr) continue;
986
987 // Read type of key
988 int32_t r_type_i_ref;
989 io.read_to(&r_type_i_ref, sizeof(r_type_i_ref));
990 const int32_t r_type_i = (int32_t) r_l[il]->type;
991 if (r_type_i != r_type_i_ref) {
992 LLAMA_LOG_ERROR("%s: mismatched r type (%d != %d, layer %d)\n", __func__, r_type_i, r_type_i_ref, il);
993 return false;
994 }
995
996 // Read row size of key
997 uint64_t r_size_row_ref;
998 io.read_to(&r_size_row_ref, sizeof(r_size_row_ref));
999 const size_t r_size_row = ggml_row_size(r_l[il]->type, hparams.n_embd_r());
1000 if (r_size_row != r_size_row_ref) {
1001 LLAMA_LOG_ERROR("%s: mismatched r row size (%zu != %zu, layer %d)\n", __func__, r_size_row, (size_t) r_size_row_ref, il);
1002 return false;
1003 }
1004
1005 if (cell_count) {
1006 // Read and set the keys for the whole cell range
1007 ggml_backend_tensor_set(r_l[il], io.read(cell_count * r_size_row), head * r_size_row, cell_count * r_size_row);
1008 }
1009 }
1010
1011 if (!s_trans) {
1012 for (uint32_t il = 0; il < n_layer; ++il) {
1013 // skip null layers
1014 if (s_l[il] == nullptr) continue;
1015
1016 // Read type of value
1017 int32_t s_type_i_ref;
1018 io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
1019 const int32_t s_type_i = (int32_t)s_l[il]->type;
1020
1021 if (s_type_i != s_type_i_ref) {
1022 LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
1023 return false;
1024 }
1025
1026 // Read row size of value
1027 uint64_t s_size_row_ref;
1028 io.read_to(&s_size_row_ref, sizeof(s_size_row_ref));
1029 const size_t s_size_row = ggml_row_size(s_l[il]->type, hparams.n_embd_s());
1030 if (s_size_row != s_size_row_ref) {
1031 LLAMA_LOG_ERROR("%s: mismatched s row size (%zu != %zu, layer %d)\n", __func__, s_size_row, (size_t) s_size_row_ref, il);
1032 return false;
1033 }
1034
1035 if (cell_count) {
1036 // Read and set the values for the whole cell range
1037 ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_row), head * s_size_row, cell_count * s_size_row);
1038 }
1039 }
1040 } else {
1041 // For each layer, read the values for each cell (transposed)
1042 for (uint32_t il = 0; il < n_layer; ++il) {
1043 // skip null layers
1044 if (s_l[il] == nullptr) continue;
1045
1046 const uint32_t n_embd_s = hparams.n_embd_s();
1047
1048 // Read type of value
1049 int32_t s_type_i_ref;
1050 io.read_to(&s_type_i_ref, sizeof(s_type_i_ref));
1051 const int32_t s_type_i = (int32_t)s_l[il]->type;
1052 if (s_type_i != s_type_i_ref) {
1053 LLAMA_LOG_ERROR("%s: mismatched s type (%d != %d, layer %d)\n", __func__, s_type_i, s_type_i_ref, il);
1054 return false;
1055 }
1056
1057 // Read element size of value
1058 uint32_t s_size_el_ref;
1059 io.read_to(&s_size_el_ref, sizeof(s_size_el_ref));
1060 const size_t s_size_el = ggml_type_size(s_l[il]->type);
1061 if (s_size_el != s_size_el_ref) {
1062 LLAMA_LOG_ERROR("%s: mismatched s element size (%zu != %zu, layer %d)\n", __func__, s_size_el, (size_t) s_size_el_ref, il);
1063 return false;
1064 }
1065
1066 // Read state embedding size
1067 uint32_t n_embd_s_ref;
1068 io.read_to(&n_embd_s_ref, sizeof(n_embd_s_ref));
1069 if (n_embd_s != n_embd_s_ref) {
1070 LLAMA_LOG_ERROR("%s: mismatched s embedding size (%u != %u, layer %d)\n", __func__, n_embd_s, n_embd_s_ref, il);
1071 return false;
1072 }
1073
1074 if (cell_count) {
1075 // For each row in the transposed matrix, read the values for the whole cell range
1076 for (uint32_t j = 0; j < n_embd_s; ++j) {
1077 const size_t dst_offset = (head + j * size) * s_size_el;
1078 ggml_backend_tensor_set(s_l[il], io.read(cell_count * s_size_el), dst_offset, cell_count * s_size_el);
1079 }
1080 }
1081 }
1082 }
1083
1084 return true;
1085}
1086
1087//
1088// llama_memory_recurrent_context
1089//
1090
1091llama_memory_recurrent_context::llama_memory_recurrent_context(llama_memory_status status) : status(status) {}
1092
1093llama_memory_recurrent_context::llama_memory_recurrent_context(
1094 llama_memory_recurrent * mem) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), is_full(true) {
1095}
1096
1097llama_memory_recurrent_context::llama_memory_recurrent_context(
1098 llama_memory_recurrent * mem,
1099 std::vector<llama_ubatch> ubatches) : status(LLAMA_MEMORY_STATUS_SUCCESS), mem(mem), ubatches(std::move(ubatches)) {}
1100
1101llama_memory_recurrent_context::~llama_memory_recurrent_context() = default;
1102
1103bool llama_memory_recurrent_context::next() {
1104 assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
1105
1106 if (++i_next >= ubatches.size()) {
1107 return false;
1108 }
1109
1110 return true;
1111}
1112
1113bool llama_memory_recurrent_context::apply() {
1114 assert(!llama_memory_status_is_fail(status));
1115
1116 // no ubatches -> this is an update
1117 if (ubatches.empty()) {
1118 // recurrent cache never performs updates
1119 assert(status == LLAMA_MEMORY_STATUS_NO_UPDATE);
1120
1121 return true;
1122 }
1123
1124 mem->find_slot(ubatches[i_next]);
1125
1126 return true;
1127}
1128
1129llama_memory_status llama_memory_recurrent_context::get_status() const {
1130 return status;
1131}
1132
1133const llama_ubatch & llama_memory_recurrent_context::get_ubatch() const {
1134 assert(status == LLAMA_MEMORY_STATUS_SUCCESS);
1135
1136 return ubatches[i_next];
1137}
1138
1139uint32_t llama_memory_recurrent_context::get_n_rs() const {
1140 return is_full ? mem->size : mem->n;
1141}
1142
1143uint32_t llama_memory_recurrent_context::get_head() const {
1144 return is_full ? 0 : mem->head;
1145}
1146
1147int32_t llama_memory_recurrent_context::get_rs_z() const {
1148 return is_full ? 0 : mem->rs_z;
1149}
1150
1151uint32_t llama_memory_recurrent_context::get_size() const {
1152 return mem->size;
1153}
1154
1155ggml_tensor * llama_memory_recurrent_context::get_r_l(int32_t il) const {
1156 return mem->r_l[il];
1157}
1158
1159ggml_tensor * llama_memory_recurrent_context::get_s_l(int32_t il) const {
1160 return mem->s_l[il];
1161}
1162
1163int32_t llama_memory_recurrent_context::s_copy(int i) const {
1164 return mem->cells[i + mem->head].src0;
1165}