1#pragma once
2
3#include "llama.h"
4#include "llama-cparams.h"
5#include "llama-graph.h"
6#include "llama-adapter.h"
7#include "llama-impl.h"
8
9#include "ggml-cpp.h"
10#include "ggml-opt.h"
11
12#include <map>
13#include <vector>
14
15struct llama_model;
16class llama_batch_allocr;
17
18class llama_io_read_i;
19class llama_io_write_i;
20
21// "memory" as in abstract memory for the context
22struct llama_memory_i;
23struct llama_memory_context_i;
24
25// "memory" as in physical memory for a buffer type, in bytes
26struct llama_memory_breakdown_data {
27 size_t model = 0; // memory allocated for the model
28 size_t context = 0; // memory allocated for the context
29 size_t compute = 0; // memory allocated for temporary compute buffers
30
31 size_t total() const {
32 return model + context + compute;
33 }
34};
35
36struct llama_context {
37 // init scheduler and compute buffers, reserve worst-case graphs
38 llama_context(
39 const llama_model & model,
40 llama_context_params params);
41
42 ~llama_context();
43
44 // reserve a new backend scheduler (if needed)
45 // for example, when:
46 // - changing loras
47 // - changing samplers
48 // - changing attention type
49 // - etc.
50 void sched_reserve();
51
52 void synchronize();
53
54 const llama_model & get_model() const;
55 const llama_cparams & get_cparams() const;
56
57 ggml_backend_sched_t get_sched() const;
58
59 uint32_t n_ctx() const;
60 uint32_t n_ctx_seq() const;
61 uint32_t n_batch() const;
62 uint32_t n_ubatch() const;
63 uint32_t n_seq_max() const;
64
65 uint32_t n_threads() const;
66 uint32_t n_threads_batch() const;
67
68 llama_memory_t get_memory() const;
69
70 // return true if the memory was updated
71 bool memory_update(bool optimize);
72
73 enum llama_pooling_type pooling_type() const;
74
75 float * get_logits();
76 float * get_logits_ith(int32_t i);
77
78 float * get_embeddings();
79 float * get_embeddings_ith(int32_t i);
80 float * get_embeddings_seq(llama_seq_id seq_id);
81
82 llama_token * get_sampled_tokens() const;
83 llama_token get_sampled_token_ith(int32_t idx);
84
85 float * get_sampled_logits_ith(int32_t idx);
86 size_t get_sampled_logits_count(int32_t idx);
87
88 float * get_sampled_probs_ith(int32_t idx);
89 size_t get_sampled_probs_count(int32_t idx);
90
91 const llama_token * get_sampled_candidates_ith(int32_t idx);
92 size_t get_sampled_candidates_count(int32_t idx);
93
94 void attach_threadpool(
95 ggml_threadpool_t threadpool,
96 ggml_threadpool_t threadpool_batch);
97
98 void detach_threadpool();
99
100 void set_n_threads(int32_t n_threads, int32_t n_threads_batch);
101
102 void set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data);
103
104 void set_embeddings (bool value);
105 void set_causal_attn(bool value);
106 void set_warmup(bool value);
107
108 void set_adapter_lora(
109 llama_adapter_lora * adapter,
110 float scale);
111
112 bool rm_adapter_lora(
113 llama_adapter_lora * adapter);
114
115 void clear_adapter_lora();
116
117 bool apply_adapter_cvec(
118 const float * data,
119 size_t len,
120 int32_t n_embd,
121 int32_t il_start,
122 int32_t il_end);
123
124 // process a single ubatch with a specific graph type
125 // if memory_context is provided, it will be applied first to the context's memory
126 // ret contains the status of the graph computation
127 // returns nullptr only if ret != GGML_STATUS_SUCCESS
128 llm_graph_result * process_ubatch(
129 const llama_ubatch & ubatch,
130 llm_graph_type gtype,
131 llama_memory_context_i * mctx,
132 ggml_status & ret);
133
134 int encode(const llama_batch & batch_inp);
135 int decode(const llama_batch & batch_inp);
136
137 //
138 // state save/load
139 //
140
141 size_t state_get_size();
142 size_t state_get_data( uint8_t * dst, size_t size);
143 size_t state_set_data(const uint8_t * src, size_t size);
144
145 size_t state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags);
146 size_t state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags);
147 size_t state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags);
148
149 bool state_load_file(
150 const char * filepath,
151 llama_token * tokens_out,
152 size_t n_token_capacity,
153 size_t * n_token_count_out);
154
155 bool state_save_file(
156 const char * filepath,
157 const llama_token * tokens,
158 size_t n_token_count);
159
160 size_t state_seq_load_file(
161 llama_seq_id seq_id,
162 const char * filepath,
163 llama_token * tokens_out,
164 size_t n_token_capacity,
165 size_t * n_token_count_out);
166
167 size_t state_seq_save_file(
168 llama_seq_id seq_id,
169 const char * filepath,
170 const llama_token * tokens,
171 size_t n_token_count);
172
173 //
174 // perf
175 //
176
177 llama_perf_context_data perf_get_data() const;
178 void perf_reset();
179
180 std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> memory_breakdown() const;
181
182 //
183 // training
184 //
185
186 void opt_init(struct llama_model * model, struct llama_opt_params lopt_params);
187
188 // TODO: more flexible combinations of logical/physical batch size and context size
189 void opt_epoch(
190 ggml_opt_dataset_t dataset,
191 ggml_opt_result_t result_train,
192 ggml_opt_result_t result_eval,
193 int64_t idata_split,
194 ggml_opt_epoch_callback callback_train,
195 ggml_opt_epoch_callback callback_eval);
196
197 void opt_epoch_iter(
198 ggml_opt_dataset_t dataset,
199 ggml_opt_result_t result,
200 const std::vector<llama_token> & tokens,
201 const std::vector<llama_token> & labels_sparse,
202 llama_batch & batch,
203 ggml_opt_epoch_callback callback,
204 bool train,
205 int64_t idata_in_loop,
206 int64_t ndata_in_loop,
207 int64_t t_loop_start);
208
209private:
210 //
211 // output
212 //
213
214 // Make sure enough space is available for outputs.
215 // Returns max number of outputs for which space was reserved.
216 uint32_t output_reserve(int32_t n_outputs);
217
218 void output_reorder();
219
220 // map the output row index `i` to batch index
221 int64_t output_resolve_row(int32_t i) const;
222
223 //
224 // graph
225 //
226
227public:
228 uint32_t graph_max_nodes(uint32_t n_tokens) const;
229
230 // can reuse the llm_graph_result instance of the context (for example to update a memory module)
231 llm_graph_result * get_gf_res_reserve() const;
232
233 // returns the result of ggml_backend_sched_graph_compute_async execution
234 ggml_status graph_compute(ggml_cgraph * gf, bool batched);
235
236 // reserve a graph with a dummy ubatch of the specified size
237 ggml_cgraph * graph_reserve(
238 uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only = false, size_t * sizes = nullptr);
239
240 bool set_sampler(llama_seq_id seq_id, llama_sampler * sampler);
241
242private:
243 llm_graph_params graph_params(
244 llm_graph_result * res,
245 const llama_ubatch & ubatch,
246 const llama_memory_context_i * mctx,
247 llm_graph_type gtype) const;
248
249 llm_graph_cb graph_get_cb() const;
250
251 // TODO: read/write lora adapters and cvec
252 size_t state_write_data(llama_io_write_i & io);
253 size_t state_read_data (llama_io_read_i & io);
254
255 size_t state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
256 size_t state_seq_read_data (llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags);
257
258 //
259 // members
260 //
261
262 const llama_model & model;
263
264 llama_cparams cparams;
265 llama_adapter_cvec cvec;
266 llama_adapter_loras loras;
267
268 llama_cross cross; // TODO: tmp for handling cross-attention - need something better probably
269
270 std::unique_ptr<llama_memory_i> memory;
271
272 // decode output (2-dimensional array: [n_outputs][n_vocab])
273 struct buffer_view<float> logits = {nullptr, 0};
274
275 // embeddings output (2-dimensional array: [n_outputs][n_embd])
276 // populated only when pooling_type == LLAMA_POOLING_TYPE_NONE
277 struct buffer_view<float> embd = {nullptr, 0};
278
279 struct sampling_info {
280 std::map<llama_seq_id, llama_sampler *> samplers;
281
282 struct buffer_view<float> logits = {nullptr, 0};
283 struct buffer_view<llama_token> sampled = {nullptr, 0};
284 struct buffer_view<float> probs = {nullptr, 0};
285 struct buffer_view<llama_token> candidates = {nullptr, 0};
286
287 std::vector<uint32_t> logits_count;
288 std::vector<uint32_t> probs_count;
289 std::vector<uint32_t> candidates_count;
290
291 std::vector<llama_token> token_ids_full_vocab;
292 };
293
294 sampling_info sampling;
295
296 // sequence embeddings output (map of [n_embd] vectors)
297 // populated only when pooling_type != LLAMA_POOLING_TYPE_NONE
298 std::map<llama_seq_id, std::vector<float>> embd_seq;
299
300 // reuse the batch_allocr to avoid unnecessary memory allocations
301 std::unique_ptr<llama_batch_allocr> balloc;
302
303 uint32_t n_outputs = 0; // number of actually-used outputs in the current ubatch or last logical batch
304
305 std::vector<int32_t> output_ids; // map batch token positions to ids of the logits and embd buffers
306
307 struct swap_info {
308 uint32_t i0;
309 uint32_t i1;
310 };
311
312 std::vector<swap_info> output_swaps;
313
314 ggml_backend_sched_ptr sched;
315
316 bool sched_need_reserve = true;
317
318 ggml_backend_t backend_cpu = nullptr;
319 std::vector<ggml_backend_ptr> backends;
320
321 // training
322 ggml_opt_context_t opt_ctx = nullptr;
323
324 ggml_threadpool_t threadpool = nullptr;
325 ggml_threadpool_t threadpool_batch = nullptr;
326
327 ggml_abort_callback abort_callback = nullptr;
328 void * abort_callback_data = nullptr;
329
330 std::vector<std::pair<ggml_backend_t, ggml_backend_set_n_threads_t>> set_n_threads_fns;
331
332 // pointers and buffer types used for the compute buffer of each backend
333 std::vector<ggml_backend_t> backend_ptrs;
334 std::vector<ggml_backend_buffer_type_t> backend_buft;
335 std::vector<size_t> backend_buf_exp_size; // expected buffer sizes
336
337 llm_graph_result_ptr gf_res_prev;
338 llm_graph_result_ptr gf_res_reserve;
339
340 // host buffer for the model output (logits and embeddings)
341 ggml_backend_buffer_ptr buf_output;
342
343 bool has_evaluated_once = false;
344
345 // env: LLAMA_GRAPH_REUSE_DISABLE
346 bool graph_reuse_disable = false;
347
348 // perf
349 mutable int64_t t_start_us = 0;
350 mutable int64_t t_load_us = 0;
351 mutable int64_t t_p_eval_us = 0;
352 mutable int64_t t_eval_us = 0;
353
354 mutable int64_t t_compute_start_us = 0;
355 mutable int64_t n_queued_tokens = 0;
356
357 mutable int32_t n_p_eval = 0; // number of tokens in eval calls for the prompt (with batch size > 1)
358 mutable int32_t n_eval = 0; // number of eval calls
359
360 mutable int32_t n_reused = 0; // number of times the previous graph was reused
361};