1#pragma once
2
3#include "llama-batch.h"
4#include "llama-graph.h"
5#include "llama-kv-cells.h"
6#include "llama-memory.h"
7
8#include <unordered_map>
9#include <vector>
10
11struct llama_cparams;
12struct llama_hparams;
13struct llama_model;
14struct llama_context;
15
16//
17// llama_kv_cache
18//
19
20class llama_kv_cache : public llama_memory_i {
21public:
22 struct stream_copy_info {
23 bool empty() const {
24 assert(ssrc.size() == sdst.size());
25 return ssrc.empty();
26 }
27
28 std::vector<uint32_t> ssrc;
29 std::vector<uint32_t> sdst;
30 };
31
32 // for each ubatch, create a slot_info that contains information about where the ubatch should be inserted in the
33 // KV cells. for example, cell indices for each token, such that: token[i] -> goes to cells[idxs[i]]
34 struct slot_info {
35 // data for ggml_set_rows
36 using idx_vec_t = std::vector<uint32_t>;
37
38 // number of streams: ns = s1 - s0 + 1
39 uint32_t s0;
40 uint32_t s1;
41
42 std::vector<llama_seq_id> strm; // [ns]
43 std::vector<idx_vec_t> idxs; // [ns]
44
45 uint32_t head() const {
46 GGML_ASSERT(idxs.size() == 1);
47 GGML_ASSERT(!idxs[0].empty());
48
49 return idxs[0][0];
50 }
51
52 void resize(size_t n) {
53 strm.resize(n);
54 idxs.resize(n);
55 }
56
57 size_t size() const {
58 GGML_ASSERT(idxs.size() == strm.size());
59 GGML_ASSERT(!idxs.empty());
60
61 return idxs[0].size();
62 }
63
64 size_t n_stream() const {
65 return strm.size();
66 }
67
68 bool empty() const {
69 return idxs.empty();
70 }
71
72 void clear() {
73 idxs.clear();
74 }
75
76 // check if indices are contiguous starting from head()
77 bool is_contiguous() const {
78 if (idxs.empty() || idxs[0].empty()) {
79 return true;
80 }
81 if (idxs.size() > 1) {
82 return false;
83 }
84 const uint32_t h = idxs[0][0];
85 for (size_t i = 0; i < idxs[0].size(); ++i) {
86 if (idxs[0][i] != h + i) {
87 return false;
88 }
89 }
90 return true;
91 }
92 };
93
94 using slot_info_vec_t = std::vector<slot_info>;
95
96 llama_kv_cache(
97 const llama_model & model,
98 ggml_type type_k,
99 ggml_type type_v,
100 bool v_trans,
101 bool offload,
102 bool unified,
103 uint32_t kv_size,
104 uint32_t n_seq_max,
105 uint32_t n_pad,
106 uint32_t n_swa,
107 llama_swa_type swa_type,
108 const layer_filter_cb & filter,
109 const layer_reuse_cb & reuse);
110
111 ~llama_kv_cache() = default;
112
113 //
114 // llama_memory_i
115 //
116
117 llama_memory_context_ptr init_batch(
118 llama_batch_allocr & balloc,
119 uint32_t n_ubatch,
120 bool embd_all) override;
121
122 llama_memory_context_ptr init_full() override;
123
124 llama_memory_context_ptr init_update(llama_context * lctx, bool optimize) override;
125
126 bool get_can_shift() const override;
127
128 void clear(bool data) override;
129
130 bool seq_rm (llama_seq_id seq_id, llama_pos p0, llama_pos p1) override;
131 void seq_cp (llama_seq_id seq_id_src, llama_seq_id seq_id_dst, llama_pos p0, llama_pos p1) override;
132 void seq_keep(llama_seq_id seq_id) override;
133 void seq_add (llama_seq_id seq_id, llama_pos p0, llama_pos p1, llama_pos shift) override;
134 void seq_div (llama_seq_id seq_id, llama_pos p0, llama_pos p1, int d) override;
135
136 llama_pos seq_pos_min(llama_seq_id seq_id) const override;
137 llama_pos seq_pos_max(llama_seq_id seq_id) const override;
138
139 std::map<ggml_backend_buffer_type_t, size_t> memory_breakdown() const override;
140
141 // state write/load
142
143 void state_write(llama_io_write_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) const override;
144 void state_read (llama_io_read_i & io, llama_seq_id seq_id = -1, llama_state_seq_flags flags = 0) override;
145
146 //
147 // llama_kv_cache specific API
148 //
149
150 uint32_t get_size() const;
151 uint32_t get_n_stream() const;
152
153 bool get_has_shift() const;
154
155 //
156 // graph_build API
157 //
158
159 uint32_t get_n_kv(const slot_info & sinfo) const;
160
161 // get views of the current state of the cache
162 ggml_tensor * get_k(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const;
163 ggml_tensor * get_v(ggml_context * ctx, int32_t il, uint32_t n_kv, const slot_info & sinfo) const;
164
165 // store k_cur and v_cur in the cache based on the provided head location
166 ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il, const slot_info & sinfo) const;
167 ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il, const slot_info & sinfo) const;
168
169 //
170 // preparation API
171 //
172
173 // find places for the provided ubatches in the cache, returns the slot infos
174 // return empty vector on failure
175 slot_info_vec_t prepare(const std::vector<llama_ubatch> & ubatches);
176
177 bool update(llama_context * lctx, bool do_shift, const stream_copy_info & sc_info);
178
179 // find a slot of kv cells that can hold the ubatch
180 // if cont == true, then the slot must be continuous
181 // return empty slot_info on failure
182 slot_info find_slot(const llama_ubatch & ubatch, bool cont) const;
183
184 // emplace the ubatch context into slot: [sinfo.idxs[0...ubatch.n_tokens - 1]]
185 void apply_ubatch(const slot_info & sinfo, const llama_ubatch & ubatch);
186
187 //
188 // input API
189 //
190
191 ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
192 ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
193
194 void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
195 void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch, const slot_info & sinfo) const;
196
197 void set_input_k_shift(ggml_tensor * dst) const;
198
199 void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
200 void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
201
202private:
203 const llama_model & model;
204 const llama_hparams & hparams;
205
206 struct kv_layer {
207 // layer index in the model
208 // note: can be different from the layer index in the KV cache
209 uint32_t il;
210
211 ggml_tensor * k;
212 ggml_tensor * v;
213
214 std::vector<ggml_tensor *> k_stream;
215 std::vector<ggml_tensor *> v_stream;
216 };
217
218 bool v_trans = true; // the value tensor is transposed
219
220 const uint32_t n_seq_max = 1;
221 const uint32_t n_stream = 1;
222
223 // required padding
224 const uint32_t n_pad = 1;
225
226 // SWA
227 const uint32_t n_swa = 0;
228
229 // env: LLAMA_KV_CACHE_DEBUG
230 int debug = 0;
231
232 // this is the SWA type of the cache - not to be confused with the model SWA type
233 const llama_swa_type swa_type = LLAMA_SWA_TYPE_NONE;
234
235 // ggml contexts for the KV cache along with the allocated backend buffers:
236 std::vector<std::pair<ggml_context_ptr, ggml_backend_buffer_ptr>> ctxs_bufs;
237
238 // the current index from where we start searching for a free slot in the ring buffer of KV cells (see find_slot())
239 // note: this is not part of the KV state and it's only used to speed-up the find_slot() method
240 std::vector<uint32_t> v_heads;
241
242 std::vector<llama_kv_cells> v_cells;
243
244 // maps from a sequence id to a stream id
245 std::vector<uint32_t> seq_to_stream;
246
247 // pending stream copies that will be applied during the next update
248 stream_copy_info sc_info;
249
250 std::vector<kv_layer> layers;
251
252 // model layer id -> KV cache layer id
253 std::unordered_map<int32_t, int32_t> map_layer_ids;
254
255 size_t total_size() const;
256
257 size_t size_k_bytes() const;
258 size_t size_v_bytes() const;
259
260 ggml_tensor * build_rope_shift(
261 const llama_cparams & cparams,
262 ggml_context * ctx,
263 ggml_tensor * cur,
264 ggml_tensor * shift,
265 ggml_tensor * factors,
266 float freq_base,
267 float freq_scale) const;
268
269 ggml_cgraph * build_graph_shift(
270 llm_graph_result * res,
271 llama_context * lctx) const;
272
273 struct cell_ranges_t {
274 uint32_t strm;
275
276 std::vector<std::pair<uint32_t, uint32_t>> data; // ranges, from inclusive, to exclusive
277 };
278
279 void state_write_meta(llama_io_write_i & io, const cell_ranges_t & cr, llama_seq_id seq_id = -1) const;
280 void state_write_data(llama_io_write_i & io, const cell_ranges_t & cr) const;
281
282 bool state_read_meta(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, slot_info & sinfo, llama_seq_id dest_seq_id = -1);
283 bool state_read_data(llama_io_read_i & io, uint32_t strm, uint32_t cell_count, const slot_info & sinfo);
284};
285
286class llama_kv_cache_context : public llama_memory_context_i {
287public:
288 // some shorthands
289 using slot_info_vec_t = llama_kv_cache::slot_info_vec_t;
290 using stream_copy_info = llama_kv_cache::stream_copy_info;
291
292 // used for errors
293 llama_kv_cache_context(llama_memory_status status);
294
295 // used to create a full-cache context
296 llama_kv_cache_context(
297 llama_kv_cache * kv);
298
299 // used to create an update context
300 llama_kv_cache_context(
301 llama_kv_cache * kv,
302 llama_context * lctx,
303 bool do_shift,
304 stream_copy_info sc_info);
305
306 // used to create a batch processing context from a batch
307 llama_kv_cache_context(
308 llama_kv_cache * kv,
309 slot_info_vec_t sinfos,
310 std::vector<llama_ubatch> ubatches);
311
312 virtual ~llama_kv_cache_context();
313
314 //
315 // llama_memory_context_i
316 //
317
318 bool next() override;
319 bool apply() override;
320
321 llama_memory_status get_status() const override;
322 const llama_ubatch & get_ubatch() const override;
323
324 //
325 // llama_kv_cache_context specific API
326 //
327
328 uint32_t get_n_kv() const;
329
330 // get views of the current state of the cache
331 ggml_tensor * get_k(ggml_context * ctx, int32_t il) const;
332 ggml_tensor * get_v(ggml_context * ctx, int32_t il) const;
333
334 // store k_cur and v_cur in the cache based on the provided head location
335 // note: the heads in k_cur and v_cur should be layed out contiguously in memory
336 // - k_cur [n_embd_head_k, n_head_k, n_tokens]
337 // - k_idxs [n_tokens]
338 // - v_cur [n_embd_head_v, n_head_v, n_tokens]
339 // - v_idxs [n_tokens] or [n_tokens*n_embd_v_gqa] depending if V cache is transposed
340 ggml_tensor * cpy_k(ggml_context * ctx, ggml_tensor * k_cur, ggml_tensor * k_idxs, int32_t il) const;
341 ggml_tensor * cpy_v(ggml_context * ctx, ggml_tensor * v_cur, ggml_tensor * v_idxs, int32_t il) const;
342
343 // create destination indices for each head of the current batch for where it would be written in the KV cache
344 // the indices address the global KV cache (not per stream) - this is not relevant for the user of this API, but
345 // helps understand the implementation logic of cpy_k and cpy_v
346 ggml_tensor * build_input_k_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
347 ggml_tensor * build_input_v_idxs(ggml_context * ctx, const llama_ubatch & ubatch) const;
348
349 void set_input_k_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
350 void set_input_v_idxs(ggml_tensor * dst, const llama_ubatch * ubatch) const;
351
352 void set_input_k_shift (ggml_tensor * dst) const;
353 void set_input_kq_mask (ggml_tensor * dst, const llama_ubatch * ubatch, bool causal_attn) const;
354 void set_input_pos_bucket(ggml_tensor * dst, const llama_ubatch * ubatch) const;
355
356private:
357 llama_memory_status status;
358
359 llama_kv_cache * kv;
360 llama_context * lctx;
361
362 //
363 // update context
364 //
365
366 bool do_shift = false;
367
368 stream_copy_info sc_info;
369
370 //
371 // batch processing context
372 //
373
374 // the index of the cur ubatch to process
375 size_t i_cur = 0;
376
377 slot_info_vec_t sinfos;
378
379 std::vector<llama_ubatch> ubatches;
380
381 //
382 // data needed for building the compute graph for the current ubatch:
383 //
384
385 // a heuristic, to avoid attending the full cache if it is not yet utilized
386 // as the cache gets filled, the benefit from this heuristic disappears
387 int32_t n_kv;
388};