diff options
Diffstat (limited to 'llama.cpp/src/llama-context.cpp')
| -rw-r--r-- | llama.cpp/src/llama-context.cpp | 3691 |
1 files changed, 3691 insertions, 0 deletions
diff --git a/llama.cpp/src/llama-context.cpp b/llama.cpp/src/llama-context.cpp new file mode 100644 index 0000000..6b43ca1 --- /dev/null +++ b/llama.cpp/src/llama-context.cpp | |||
| @@ -0,0 +1,3691 @@ | |||
| 1 | #include "llama-context.h" | ||
| 2 | |||
| 3 | #include "llama-arch.h" | ||
| 4 | #include "llama-impl.h" | ||
| 5 | #include "llama-batch.h" | ||
| 6 | #include "llama-io.h" | ||
| 7 | #include "llama-memory.h" | ||
| 8 | #include "llama-mmap.h" | ||
| 9 | #include "llama-model.h" | ||
| 10 | |||
| 11 | #include <cinttypes> | ||
| 12 | #include <cmath> | ||
| 13 | #include <cstring> | ||
| 14 | #include <limits> | ||
| 15 | #include <stdexcept> | ||
| 16 | |||
| 17 | // | ||
| 18 | // llama_context | ||
| 19 | // | ||
| 20 | |||
| 21 | llama_context::llama_context( | ||
| 22 | const llama_model & model, | ||
| 23 | llama_context_params params) : | ||
| 24 | model(model), | ||
| 25 | balloc(std::make_unique<llama_batch_allocr>(model.hparams.n_pos_per_embd())) { | ||
| 26 | // TODO warning when creating llama_context with awkward ctx size that is not a power of 2, | ||
| 27 | // may need to be backend-dependent | ||
| 28 | LLAMA_LOG_INFO("%s: constructing llama_context\n", __func__); | ||
| 29 | |||
| 30 | t_start_us = model.t_start_us; | ||
| 31 | t_load_us = model.t_load_us; | ||
| 32 | |||
| 33 | const auto & hparams = model.hparams; | ||
| 34 | |||
| 35 | cparams.n_seq_max = std::max(1u, params.n_seq_max); | ||
| 36 | if (cparams.n_seq_max > LLAMA_MAX_SEQ) { | ||
| 37 | throw std::runtime_error("n_seq_max must be <= " + std::to_string(LLAMA_MAX_SEQ)); | ||
| 38 | } | ||
| 39 | |||
| 40 | cparams.n_threads = params.n_threads; | ||
| 41 | cparams.n_threads_batch = params.n_threads_batch; | ||
| 42 | cparams.yarn_ext_factor = params.yarn_ext_factor >= 0.0f ? params.yarn_ext_factor : hparams.yarn_ext_factor; | ||
| 43 | cparams.yarn_attn_factor = params.yarn_attn_factor >= 0.0f ? params.yarn_attn_factor : hparams.yarn_attn_factor; | ||
| 44 | cparams.yarn_beta_fast = params.yarn_beta_fast >= 0.0f ? params.yarn_beta_fast : hparams.yarn_beta_fast; | ||
| 45 | cparams.yarn_beta_slow = params.yarn_beta_slow >= 0.0f ? params.yarn_beta_slow : hparams.yarn_beta_slow; | ||
| 46 | cparams.embeddings = params.embeddings; | ||
| 47 | cparams.offload_kqv = params.offload_kqv; | ||
| 48 | cparams.no_perf = params.no_perf; | ||
| 49 | cparams.pooling_type = params.pooling_type; | ||
| 50 | cparams.warmup = false; | ||
| 51 | |||
| 52 | cparams.n_ctx = params.n_ctx == 0 ? hparams.n_ctx_train : params.n_ctx; | ||
| 53 | cparams.rope_freq_base = params.rope_freq_base == 0.0f ? hparams.rope_freq_base_train : params.rope_freq_base; | ||
| 54 | cparams.rope_freq_scale = params.rope_freq_scale == 0.0f ? hparams.rope_freq_scale_train : params.rope_freq_scale; | ||
| 55 | |||
| 56 | cparams.n_ctx_orig_yarn = params.yarn_orig_ctx != 0 ? params.yarn_orig_ctx : | ||
| 57 | hparams.n_ctx_orig_yarn != 0 ? hparams.n_ctx_orig_yarn : | ||
| 58 | hparams.n_ctx_train; | ||
| 59 | |||
| 60 | cparams.cb_eval = params.cb_eval; | ||
| 61 | cparams.cb_eval_user_data = params.cb_eval_user_data; | ||
| 62 | |||
| 63 | // Initialize backend samplers here so they are part of the sampling graph | ||
| 64 | // before the reserve passes run later in this function. This avoids a later | ||
| 65 | // re-reserve when graph nodes change. | ||
| 66 | if (params.samplers != nullptr && params.n_samplers > 0) { | ||
| 67 | for (size_t i = 0; i < params.n_samplers; ++i) { | ||
| 68 | const auto & config = params.samplers[i]; | ||
| 69 | |||
| 70 | if (llama_sampler_chain_get(config.sampler, -1) == nullptr) { | ||
| 71 | throw std::runtime_error("the backend samplers must be of type llama_sampler_chain"); | ||
| 72 | } | ||
| 73 | |||
| 74 | if (set_sampler(config.seq_id, config.sampler)) { | ||
| 75 | const int n_samplers = llama_sampler_chain_n(config.sampler); | ||
| 76 | |||
| 77 | LLAMA_LOG_INFO("%s: setting backend sampler for seq_id %d (n = %d)\n", __func__, config.seq_id, n_samplers); | ||
| 78 | } | ||
| 79 | } | ||
| 80 | } | ||
| 81 | |||
| 82 | auto rope_scaling_type = params.rope_scaling_type; | ||
| 83 | if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED) { | ||
| 84 | rope_scaling_type = hparams.rope_scaling_type_train; | ||
| 85 | } | ||
| 86 | |||
| 87 | if (rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_NONE) { | ||
| 88 | cparams.rope_freq_scale = 1.0f; // never scale if scaling type is none | ||
| 89 | } | ||
| 90 | |||
| 91 | if (cparams.yarn_ext_factor < 0.0f) { // negative indicates 'not set' | ||
| 92 | cparams.yarn_ext_factor = rope_scaling_type == LLAMA_ROPE_SCALING_TYPE_YARN ? 1.0f : 0.0f; | ||
| 93 | } | ||
| 94 | |||
| 95 | if (cparams.yarn_ext_factor != 0) { | ||
| 96 | static auto get_mscale = [](float scale, float mscale) { | ||
| 97 | return scale <= 1.0f ? 1.0f : (0.1f * mscale * logf(scale) + 1.0f); | ||
| 98 | }; | ||
| 99 | |||
| 100 | const float factor = 1.0f / cparams.rope_freq_scale; | ||
| 101 | |||
| 102 | // ref: https://github.com/huggingface/transformers/blob/6d00f6b0a5679c36510f203e4226e36f517c3032/src/transformers/modeling_rope_utils.py#L336-L348 | ||
| 103 | if (hparams.rope_yarn_log_mul != 0.0f) { | ||
| 104 | // note: here we assume `mscale == 1.0f` | ||
| 105 | // TODO: start reading the actual value of mscale and handle the case where it is not 1.0f | ||
| 106 | float mscale = 1.0f; | ||
| 107 | const float mscale_all_dims = hparams.rope_yarn_log_mul; | ||
| 108 | |||
| 109 | // [TAG_DEEPSEEK2_YARN_LOG_MUL_FIX] | ||
| 110 | // special-case DEEPSEEK v2: | ||
| 111 | // https://huggingface.co/deepseek-ai/DeepSeek-V2-Lite-Chat/blob/main/config.json#L42-L43 | ||
| 112 | if (model.arch == LLM_ARCH_DEEPSEEK2 && mscale_all_dims != 1.0f) { | ||
| 113 | mscale = mscale_all_dims; | ||
| 114 | } | ||
| 115 | |||
| 116 | cparams.yarn_attn_factor = get_mscale(factor, mscale) / get_mscale(factor, mscale_all_dims); | ||
| 117 | |||
| 118 | LLAMA_LOG_WARN("%s: setting new yarn_attn_factor = %.4f (mscale == %.1f, mscale_all_dim = %.1f)\n", | ||
| 119 | __func__, cparams.yarn_attn_factor, mscale, mscale_all_dims); | ||
| 120 | } else { | ||
| 121 | cparams.yarn_attn_factor = get_mscale(factor, 1.0f); | ||
| 122 | } | ||
| 123 | |||
| 124 | // when YARN is applied with yarn_ext_factor != 0.0f, we need to cancel this factor: | ||
| 125 | // https://github.com/ggml-org/llama.cpp/blob/a81a569577cc38b32558958b048228150be63eae/ggml/src/ggml-cpu/ops.cpp#L5541-L5544 | ||
| 126 | // | ||
| 127 | // ref: https://github.com/ggml-org/llama.cpp/discussions/7416 | ||
| 128 | // https://github.com/ggml-org/llama.cpp/pull/17945 | ||
| 129 | cparams.yarn_attn_factor *= 1.0f / (1.0f + 0.1f * logf(factor)); | ||
| 130 | } | ||
| 131 | |||
| 132 | cparams.yarn_attn_factor *= hparams.rope_attn_factor; | ||
| 133 | |||
| 134 | if (cparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { | ||
| 135 | if (hparams.pooling_type == LLAMA_POOLING_TYPE_UNSPECIFIED) { | ||
| 136 | cparams.pooling_type = LLAMA_POOLING_TYPE_NONE; | ||
| 137 | } else { | ||
| 138 | cparams.pooling_type = hparams.pooling_type; | ||
| 139 | } | ||
| 140 | } | ||
| 141 | |||
| 142 | if (params.attention_type == LLAMA_ATTENTION_TYPE_UNSPECIFIED) { | ||
| 143 | cparams.causal_attn = hparams.causal_attn; | ||
| 144 | } else { | ||
| 145 | cparams.causal_attn = params.attention_type == LLAMA_ATTENTION_TYPE_CAUSAL; | ||
| 146 | } | ||
| 147 | |||
| 148 | cparams.flash_attn = params.flash_attn_type != LLAMA_FLASH_ATTN_TYPE_DISABLED; | ||
| 149 | cparams.auto_fa = params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO; | ||
| 150 | |||
| 151 | // with causal attention, the batch size is limited by the context size | ||
| 152 | cparams.n_batch = cparams.causal_attn ? std::min(cparams.n_ctx, params.n_batch) : params.n_batch; | ||
| 153 | |||
| 154 | cparams.n_ubatch = std::min(cparams.n_batch, params.n_ubatch == 0 ? params.n_batch : params.n_ubatch); | ||
| 155 | |||
| 156 | cparams.op_offload = params.op_offload; | ||
| 157 | cparams.kv_unified = params.kv_unified; | ||
| 158 | |||
| 159 | // intialized later | ||
| 160 | cparams.pipeline_parallel = false; | ||
| 161 | |||
| 162 | { | ||
| 163 | const char * LLAMA_GRAPH_REUSE_DISABLE = getenv("LLAMA_GRAPH_REUSE_DISABLE"); | ||
| 164 | graph_reuse_disable = LLAMA_GRAPH_REUSE_DISABLE ? (atoi(LLAMA_GRAPH_REUSE_DISABLE) != 0) : graph_reuse_disable; | ||
| 165 | |||
| 166 | if (graph_reuse_disable) { | ||
| 167 | LLAMA_LOG_WARN("%s: graph reuse disabled\n", __func__); | ||
| 168 | } | ||
| 169 | } | ||
| 170 | |||
| 171 | // ref: https://github.com/ggml-org/llama.cpp/pull/17046#discussion_r2503085732 | ||
| 172 | cparams.n_ctx = GGML_PAD(cparams.n_ctx, 256); | ||
| 173 | |||
| 174 | if (cparams.kv_unified) { | ||
| 175 | cparams.n_ctx_seq = cparams.n_ctx; | ||
| 176 | } else { | ||
| 177 | cparams.n_ctx_seq = cparams.n_ctx / cparams.n_seq_max; | ||
| 178 | cparams.n_ctx_seq = GGML_PAD(cparams.n_ctx_seq, 256); | ||
| 179 | |||
| 180 | if (cparams.n_ctx_seq == 0) { | ||
| 181 | throw std::runtime_error("n_ctx_seq == 0"); | ||
| 182 | } | ||
| 183 | |||
| 184 | if (cparams.n_ctx != cparams.n_ctx_seq * cparams.n_seq_max) { | ||
| 185 | cparams.n_ctx = cparams.n_ctx_seq * cparams.n_seq_max; | ||
| 186 | LLAMA_LOG_WARN("%s: n_ctx is not divisible by n_seq_max - rounding down to %u\n", __func__, cparams.n_ctx); | ||
| 187 | } | ||
| 188 | } | ||
| 189 | |||
| 190 | LLAMA_LOG_INFO("%s: n_seq_max = %u\n", __func__, cparams.n_seq_max); | ||
| 191 | LLAMA_LOG_INFO("%s: n_ctx = %u\n", __func__, cparams.n_ctx); | ||
| 192 | LLAMA_LOG_INFO("%s: n_ctx_seq = %u\n", __func__, cparams.n_ctx_seq); | ||
| 193 | LLAMA_LOG_INFO("%s: n_batch = %u\n", __func__, cparams.n_batch); | ||
| 194 | LLAMA_LOG_INFO("%s: n_ubatch = %u\n", __func__, cparams.n_ubatch); | ||
| 195 | LLAMA_LOG_INFO("%s: causal_attn = %d\n", __func__, cparams.causal_attn); | ||
| 196 | LLAMA_LOG_INFO("%s: flash_attn = %s\n", __func__, llama_flash_attn_type_name(params.flash_attn_type)); | ||
| 197 | LLAMA_LOG_INFO("%s: kv_unified = %s\n", __func__, cparams.kv_unified ? "true" : "false"); | ||
| 198 | LLAMA_LOG_INFO("%s: freq_base = %.1f\n", __func__, cparams.rope_freq_base); | ||
| 199 | LLAMA_LOG_INFO("%s: freq_scale = %g\n", __func__, cparams.rope_freq_scale); | ||
| 200 | |||
| 201 | if (cparams.n_ctx_seq < hparams.n_ctx_train) { | ||
| 202 | LLAMA_LOG_WARN("%s: n_ctx_seq (%u) < n_ctx_train (%u) -- the full capacity of the model will not be utilized\n", | ||
| 203 | __func__, cparams.n_ctx_seq, hparams.n_ctx_train); | ||
| 204 | } | ||
| 205 | |||
| 206 | if (cparams.n_ctx_seq > hparams.n_ctx_train) { | ||
| 207 | LLAMA_LOG_WARN("%s: n_ctx_seq (%u) > n_ctx_train (%u) -- possible training context overflow\n", | ||
| 208 | __func__, cparams.n_ctx_seq, hparams.n_ctx_train); | ||
| 209 | } | ||
| 210 | |||
| 211 | if (!hparams.vocab_only) { | ||
| 212 | // GPU backends | ||
| 213 | for (auto * dev : model.devices) { | ||
| 214 | ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); | ||
| 215 | if (backend == nullptr) { | ||
| 216 | throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev))); | ||
| 217 | } | ||
| 218 | backends.emplace_back(backend); | ||
| 219 | } | ||
| 220 | |||
| 221 | // add ACCEL backends (such as BLAS) | ||
| 222 | for (size_t i = 0; i < ggml_backend_dev_count(); ++i) { | ||
| 223 | ggml_backend_dev_t dev = ggml_backend_dev_get(i); | ||
| 224 | if (ggml_backend_dev_type(dev) == GGML_BACKEND_DEVICE_TYPE_ACCEL) { | ||
| 225 | ggml_backend_t backend = ggml_backend_dev_init(dev, nullptr); | ||
| 226 | if (backend == nullptr) { | ||
| 227 | throw std::runtime_error(format("failed to initialize %s backend", ggml_backend_dev_name(dev))); | ||
| 228 | } | ||
| 229 | backends.emplace_back(backend); | ||
| 230 | } | ||
| 231 | } | ||
| 232 | |||
| 233 | // add CPU backend | ||
| 234 | backend_cpu = ggml_backend_init_by_type(GGML_BACKEND_DEVICE_TYPE_CPU, nullptr); | ||
| 235 | if (backend_cpu == nullptr) { | ||
| 236 | throw std::runtime_error("failed to initialize CPU backend"); | ||
| 237 | } | ||
| 238 | backends.emplace_back(backend_cpu); | ||
| 239 | |||
| 240 | // create a list of the set_n_threads functions in the backends | ||
| 241 | for (auto & backend : backends) { | ||
| 242 | ggml_backend_dev_t dev = ggml_backend_get_device(backend.get()); | ||
| 243 | ggml_backend_reg_t reg = dev ? ggml_backend_dev_backend_reg(dev) : nullptr; | ||
| 244 | if (reg) { | ||
| 245 | auto ggml_backend_set_n_threads_fn = (ggml_backend_set_n_threads_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_n_threads"); | ||
| 246 | if (ggml_backend_set_n_threads_fn) { | ||
| 247 | set_n_threads_fns.emplace_back(backend.get(), ggml_backend_set_n_threads_fn); | ||
| 248 | } | ||
| 249 | } | ||
| 250 | } | ||
| 251 | |||
| 252 | llama_set_abort_callback(this, params.abort_callback, params.abort_callback_data); | ||
| 253 | |||
| 254 | // graph outputs buffer | ||
| 255 | { | ||
| 256 | if (output_reserve(params.n_seq_max) < params.n_seq_max) { | ||
| 257 | throw std::runtime_error("failed to reserve initial output buffer"); | ||
| 258 | } | ||
| 259 | |||
| 260 | LLAMA_LOG_INFO("%s: %10s output buffer size = %8.2f MiB\n", __func__, | ||
| 261 | ggml_backend_buffer_name (buf_output.get()), | ||
| 262 | ggml_backend_buffer_get_size(buf_output.get()) / 1024.0 / 1024.0); | ||
| 263 | } | ||
| 264 | } | ||
| 265 | |||
| 266 | // init the memory module | ||
| 267 | if (!hparams.vocab_only) { | ||
| 268 | llama_memory_params params_mem = { | ||
| 269 | /*.type_k =*/ params.type_k, | ||
| 270 | /*.type_v =*/ params.type_v, | ||
| 271 | /*.swa_full =*/ params.swa_full, | ||
| 272 | }; | ||
| 273 | |||
| 274 | memory.reset(model.create_memory(params_mem, cparams)); | ||
| 275 | } | ||
| 276 | |||
| 277 | // init backends | ||
| 278 | if (!hparams.vocab_only) { | ||
| 279 | LLAMA_LOG_DEBUG("%s: enumerating backends\n", __func__); | ||
| 280 | |||
| 281 | backend_buft.clear(); | ||
| 282 | backend_ptrs.clear(); | ||
| 283 | backend_buf_exp_size.clear(); | ||
| 284 | |||
| 285 | for (auto & backend : backends) { | ||
| 286 | auto * buft = ggml_backend_get_default_buffer_type(backend.get()); | ||
| 287 | auto backend_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); | ||
| 288 | |||
| 289 | if (backend_type == GGML_BACKEND_DEVICE_TYPE_CPU && !model.devices.empty()) { | ||
| 290 | // use the host buffer of the first device CPU for faster transfer of the intermediate state | ||
| 291 | auto * dev = model.devices[0]; | ||
| 292 | auto * host_buft = ggml_backend_dev_host_buffer_type(dev); | ||
| 293 | if (host_buft) { | ||
| 294 | buft = host_buft; | ||
| 295 | } | ||
| 296 | } | ||
| 297 | |||
| 298 | backend_buft.push_back(buft); | ||
| 299 | backend_ptrs.push_back(backend.get()); | ||
| 300 | backend_buf_exp_size.push_back(0); | ||
| 301 | } | ||
| 302 | |||
| 303 | LLAMA_LOG_DEBUG("%s: backend_ptrs.size() = %zu\n", __func__, backend_ptrs.size()); | ||
| 304 | |||
| 305 | // TODO: move these checks to ggml_backend_sched | ||
| 306 | // enabling pipeline parallelism in the scheduler increases memory usage, so it is only done when necessary | ||
| 307 | bool pipeline_parallel = | ||
| 308 | model.n_devices() > 1 && | ||
| 309 | model.n_gpu_layers() > model.hparams.n_layer && | ||
| 310 | model.split_mode() == LLAMA_SPLIT_MODE_LAYER && | ||
| 311 | cparams.offload_kqv && | ||
| 312 | !model.has_tensor_overrides(); | ||
| 313 | |||
| 314 | // pipeline parallelism requires support for async compute and events in all devices | ||
| 315 | if (pipeline_parallel) { | ||
| 316 | for (auto & backend : backends) { | ||
| 317 | auto dev_type = ggml_backend_dev_type(ggml_backend_get_device(backend.get())); | ||
| 318 | if (dev_type == GGML_BACKEND_DEVICE_TYPE_CPU) { | ||
| 319 | // ignore CPU backend | ||
| 320 | // TODO: should we ignore ACCEL types too? | ||
| 321 | continue; | ||
| 322 | } | ||
| 323 | auto * dev = ggml_backend_get_device(backend.get()); | ||
| 324 | ggml_backend_dev_props props; | ||
| 325 | ggml_backend_dev_get_props(dev, &props); | ||
| 326 | if (!props.caps.async || !props.caps.events) { | ||
| 327 | // device does not support async compute or events | ||
| 328 | pipeline_parallel = false; | ||
| 329 | break; | ||
| 330 | } | ||
| 331 | } | ||
| 332 | } | ||
| 333 | |||
| 334 | cparams.pipeline_parallel = pipeline_parallel; | ||
| 335 | |||
| 336 | if (cparams.pipeline_parallel) { | ||
| 337 | LLAMA_LOG_INFO("%s: pipeline parallelism enabled\n", __func__); | ||
| 338 | } | ||
| 339 | |||
| 340 | sched_reserve(); | ||
| 341 | |||
| 342 | if (!cparams.flash_attn) { | ||
| 343 | if (ggml_is_quantized(params.type_v)) { | ||
| 344 | throw std::runtime_error("quantized V cache was requested, but this requires Flash Attention"); | ||
| 345 | } | ||
| 346 | } | ||
| 347 | } | ||
| 348 | |||
| 349 | // Initialize the full vocabulary token ids for backend samplers. | ||
| 350 | { | ||
| 351 | const int n_vocab = model.vocab.n_tokens(); | ||
| 352 | |||
| 353 | sampling.token_ids_full_vocab.resize(n_vocab); | ||
| 354 | for (int i = 0; i < n_vocab; ++i) { | ||
| 355 | sampling.token_ids_full_vocab[i] = i; | ||
| 356 | } | ||
| 357 | } | ||
| 358 | } | ||
| 359 | |||
| 360 | llama_context::~llama_context() { | ||
| 361 | if (!model.hparams.no_alloc) { | ||
| 362 | for (size_t i = 0; i < backend_ptrs.size(); ++i) { | ||
| 363 | ggml_backend_t backend = backend_ptrs[i]; | ||
| 364 | ggml_backend_buffer_type_t buft = backend_buft[i]; | ||
| 365 | |||
| 366 | const size_t size_exp = backend_buf_exp_size[i]; | ||
| 367 | const size_t size_act = ggml_backend_sched_get_buffer_size(sched.get(), backend); | ||
| 368 | if (size_exp == size_act) { | ||
| 369 | LLAMA_LOG_DEBUG("%s: %10s compute buffer size is %8.4f MiB, matches expectation of %8.4f MiB\n", | ||
| 370 | __func__, ggml_backend_buft_name(buft), size_act / (1024.0*1024.0), size_exp / (1024.0*1024.0)); | ||
| 371 | } else { | ||
| 372 | LLAMA_LOG_WARN("%s: %10s compute buffer size of %8.4f MiB, does not match expectation of %8.4f MiB\n", | ||
| 373 | __func__, ggml_backend_buft_name(buft), size_act / (1024.0*1024.0), size_exp / (1024.0*1024.0)); | ||
| 374 | } | ||
| 375 | } | ||
| 376 | } | ||
| 377 | ggml_opt_free(opt_ctx); | ||
| 378 | } | ||
| 379 | |||
| 380 | void llama_context::sched_reserve() { | ||
| 381 | if (!sched_need_reserve) { | ||
| 382 | return; | ||
| 383 | } | ||
| 384 | |||
| 385 | sched_need_reserve = false; | ||
| 386 | |||
| 387 | LLAMA_LOG_INFO("%s: reserving ...\n", __func__); | ||
| 388 | |||
| 389 | synchronize(); | ||
| 390 | |||
| 391 | const int64_t t_start_us = ggml_time_us(); | ||
| 392 | |||
| 393 | const uint32_t n_seqs = cparams.n_seq_max; | ||
| 394 | const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); | ||
| 395 | |||
| 396 | const size_t max_nodes = this->graph_max_nodes(n_tokens); | ||
| 397 | |||
| 398 | LLAMA_LOG_DEBUG("%s: max_nodes = %zu\n", __func__, max_nodes); | ||
| 399 | |||
| 400 | gf_res_prev.reset(new llm_graph_result(max_nodes)); | ||
| 401 | gf_res_reserve.reset(new llm_graph_result(max_nodes)); | ||
| 402 | |||
| 403 | sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, cparams.pipeline_parallel, cparams.op_offload)); | ||
| 404 | |||
| 405 | llama_memory_context_ptr mctx; | ||
| 406 | if (memory) { | ||
| 407 | LLAMA_LOG_DEBUG("%s: reserving full memory module\n", __func__); | ||
| 408 | mctx = memory->init_full(); | ||
| 409 | if (!mctx) { | ||
| 410 | throw std::runtime_error("failed to initialize memory module"); | ||
| 411 | } | ||
| 412 | } | ||
| 413 | |||
| 414 | // avoid reserving graphs with zero outputs - assume one output per sequence | ||
| 415 | const int n_outputs = n_seqs; | ||
| 416 | |||
| 417 | LLAMA_LOG_DEBUG("%s: worst-case: n_tokens = %d, n_seqs = %d, n_outputs = %d\n", __func__, n_tokens, n_seqs, n_outputs); | ||
| 418 | |||
| 419 | // resolve automatic Flash Attention use | ||
| 420 | if (cparams.auto_fa) { | ||
| 421 | auto * gf = graph_reserve(1, n_seqs, n_outputs, mctx.get(), true); | ||
| 422 | if (!gf) { | ||
| 423 | throw std::runtime_error("failed to split graph for Flash Attention check"); | ||
| 424 | } | ||
| 425 | |||
| 426 | const size_t prefix_len = strlen(LLAMA_TENSOR_NAME_FATTN) + 1; | ||
| 427 | bool fa_device_mismatch = false; | ||
| 428 | for (int i = 0; i < ggml_graph_n_nodes(gf); i++) { | ||
| 429 | ggml_tensor * n = ggml_graph_node(gf, i); | ||
| 430 | if (n->op != GGML_OP_FLASH_ATTN_EXT) { | ||
| 431 | continue; | ||
| 432 | } | ||
| 433 | ggml_backend_dev_t device_fa = ggml_backend_get_device( | ||
| 434 | ggml_backend_sched_get_tensor_backend(sched.get(), n)); | ||
| 435 | |||
| 436 | // TODO: instead of the tensor names, use a map to keep track of which (FA) tensors belong to which layer | ||
| 437 | GGML_ASSERT(strncmp(n->name, LLAMA_TENSOR_NAME_FATTN "-", prefix_len) == 0); | ||
| 438 | const int il = std::stoi(n->name + prefix_len); | ||
| 439 | ggml_backend_dev_t device_kv = model.dev_layer(il); | ||
| 440 | if (device_fa != device_kv) { | ||
| 441 | LLAMA_LOG_WARN("%s: layer %d is assigned to device %s but the Flash Attention tensor " | ||
| 442 | "is assigned to device %s (usually due to missing support)\n", | ||
| 443 | __func__, il, ggml_backend_dev_name(device_kv), ggml_backend_dev_name(device_fa)); | ||
| 444 | // FIXME: fa_device_mismatch logic is wrong for --no-kv-offload, but this is broken anyways | ||
| 445 | fa_device_mismatch = true; | ||
| 446 | break; | ||
| 447 | } | ||
| 448 | } | ||
| 449 | if (fa_device_mismatch) { | ||
| 450 | cparams.flash_attn = false; | ||
| 451 | LLAMA_LOG_WARN("%s: Flash Attention was auto, set to disabled\n", __func__); | ||
| 452 | } else { | ||
| 453 | cparams.flash_attn = true; | ||
| 454 | LLAMA_LOG_INFO("%s: Flash Attention was auto, set to enabled\n", __func__); | ||
| 455 | } | ||
| 456 | |||
| 457 | cparams.auto_fa = false; | ||
| 458 | } | ||
| 459 | |||
| 460 | // reserve worst-case graph | ||
| 461 | int n_splits_pp = -1; | ||
| 462 | int n_nodes_pp = -1; | ||
| 463 | |||
| 464 | int n_splits_tg = -1; | ||
| 465 | int n_nodes_tg = -1; | ||
| 466 | |||
| 467 | // reserve pp (prompt processing) graph first so that buffers are only allocated once | ||
| 468 | { | ||
| 469 | auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get(), | ||
| 470 | model.hparams.no_alloc, model.hparams.no_alloc ? backend_buf_exp_size.data() : nullptr); | ||
| 471 | if (!gf) { | ||
| 472 | if (cparams.pipeline_parallel) { | ||
| 473 | LLAMA_LOG_WARN("%s: compute buffer allocation failed, retrying without pipeline parallelism\n", __func__); | ||
| 474 | cparams.pipeline_parallel = false; | ||
| 475 | sched.reset(ggml_backend_sched_new(backend_ptrs.data(), backend_buft.data(), backend_ptrs.size(), max_nodes, false, cparams.op_offload)); | ||
| 476 | gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); | ||
| 477 | } | ||
| 478 | if (!gf) { | ||
| 479 | throw std::runtime_error("failed to allocate compute pp buffers"); | ||
| 480 | } | ||
| 481 | } | ||
| 482 | |||
| 483 | n_splits_pp = ggml_backend_sched_get_n_splits(sched.get()); | ||
| 484 | n_nodes_pp = ggml_graph_n_nodes(gf); | ||
| 485 | } | ||
| 486 | |||
| 487 | // reserve with tg (token generation) graph to get the number of splits and nodes | ||
| 488 | { | ||
| 489 | auto * gf = graph_reserve(n_seqs, n_seqs, n_seqs, mctx.get(), model.hparams.no_alloc); | ||
| 490 | if (!gf) { | ||
| 491 | throw std::runtime_error("failed to allocate compute tg buffers"); | ||
| 492 | } | ||
| 493 | |||
| 494 | n_splits_tg = ggml_backend_sched_get_n_splits(sched.get()); | ||
| 495 | n_nodes_tg = ggml_graph_n_nodes(gf); | ||
| 496 | } | ||
| 497 | |||
| 498 | // reserve again with pp graph to avoid ggml-alloc reallocations during inference | ||
| 499 | { | ||
| 500 | // TODO: not sure if the following graph would be worster case for multi-stream KV caches: | ||
| 501 | // | ||
| 502 | // auto * gf = graph_reserve(n_tokens, 1, n_tokens, mctx.get()); | ||
| 503 | // | ||
| 504 | auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get(), model.hparams.no_alloc); | ||
| 505 | if (!gf) { | ||
| 506 | throw std::runtime_error("failed to allocate compute pp buffers"); | ||
| 507 | } | ||
| 508 | } | ||
| 509 | |||
| 510 | for (size_t i = 0; i < backend_ptrs.size(); ++i) { | ||
| 511 | ggml_backend_t backend = backend_ptrs[i]; | ||
| 512 | ggml_backend_buffer_type_t buft = backend_buft[i]; | ||
| 513 | if (!model.hparams.no_alloc) { | ||
| 514 | backend_buf_exp_size[i] = ggml_backend_sched_get_buffer_size(sched.get(), backend); | ||
| 515 | } | ||
| 516 | if (backend_buf_exp_size[i] > 1) { | ||
| 517 | LLAMA_LOG_INFO("%s: %10s compute buffer size = %8.2f MiB\n", __func__, | ||
| 518 | ggml_backend_buft_name(buft), | ||
| 519 | backend_buf_exp_size[i] / 1024.0 / 1024.0); | ||
| 520 | } | ||
| 521 | } | ||
| 522 | |||
| 523 | if (n_nodes_pp == n_nodes_tg) { | ||
| 524 | LLAMA_LOG_INFO("%s: graph nodes = %d\n", __func__, n_nodes_pp); | ||
| 525 | } else { | ||
| 526 | LLAMA_LOG_INFO("%s: graph nodes = %d (with bs=%d), %d (with bs=1)\n", __func__, n_nodes_pp, n_tokens, n_nodes_tg); | ||
| 527 | } | ||
| 528 | |||
| 529 | if (n_splits_pp == n_splits_tg) { | ||
| 530 | LLAMA_LOG_INFO("%s: graph splits = %d\n", __func__, n_splits_pp); | ||
| 531 | } else { | ||
| 532 | LLAMA_LOG_INFO("%s: graph splits = %d (with bs=%d), %d (with bs=1)\n", __func__, n_splits_pp, n_tokens, n_splits_tg); | ||
| 533 | } | ||
| 534 | |||
| 535 | const int64_t t_end_us = ggml_time_us(); | ||
| 536 | |||
| 537 | LLAMA_LOG_INFO("%s: reserve took %.2f ms, sched copies = %d\n", | ||
| 538 | __func__, (t_end_us - t_start_us)/1000.0, ggml_backend_sched_get_n_copies(sched.get())); | ||
| 539 | } | ||
| 540 | |||
| 541 | void llama_context::synchronize() { | ||
| 542 | if (!sched) { | ||
| 543 | return; | ||
| 544 | } | ||
| 545 | |||
| 546 | ggml_backend_sched_synchronize(sched.get()); | ||
| 547 | |||
| 548 | // FIXME: if multiple single tokens are evaluated without a synchronization, | ||
| 549 | // the stats will be added to the prompt evaluation stats | ||
| 550 | // this should only happen when using batch size 1 to evaluate a batch | ||
| 551 | |||
| 552 | // add the evaluation to the stats | ||
| 553 | if (n_queued_tokens == 1) { | ||
| 554 | if (!cparams.no_perf) { | ||
| 555 | t_eval_us += ggml_time_us() - t_compute_start_us; | ||
| 556 | } | ||
| 557 | n_eval++; | ||
| 558 | } else if (n_queued_tokens > 1) { | ||
| 559 | if (!cparams.no_perf) { | ||
| 560 | t_p_eval_us += ggml_time_us() - t_compute_start_us; | ||
| 561 | } | ||
| 562 | n_p_eval += n_queued_tokens; | ||
| 563 | } | ||
| 564 | |||
| 565 | // get a more accurate load time, upon first eval | ||
| 566 | if (n_queued_tokens > 0 && !has_evaluated_once) { | ||
| 567 | t_load_us = ggml_time_us() - t_start_us; | ||
| 568 | has_evaluated_once = true; | ||
| 569 | } | ||
| 570 | |||
| 571 | n_queued_tokens = 0; | ||
| 572 | t_compute_start_us = 0; | ||
| 573 | } | ||
| 574 | |||
| 575 | const llama_model & llama_context::get_model() const { | ||
| 576 | return model; | ||
| 577 | } | ||
| 578 | |||
| 579 | const llama_cparams & llama_context::get_cparams() const { | ||
| 580 | return cparams; | ||
| 581 | } | ||
| 582 | |||
| 583 | ggml_backend_sched_t llama_context::get_sched() const { | ||
| 584 | return sched.get(); | ||
| 585 | } | ||
| 586 | |||
| 587 | uint32_t llama_context::n_ctx() const { | ||
| 588 | return cparams.n_ctx; | ||
| 589 | } | ||
| 590 | |||
| 591 | uint32_t llama_context::n_ctx_seq() const { | ||
| 592 | return cparams.n_ctx_seq; | ||
| 593 | } | ||
| 594 | |||
| 595 | uint32_t llama_context::n_batch() const { | ||
| 596 | return cparams.n_batch; | ||
| 597 | } | ||
| 598 | |||
| 599 | uint32_t llama_context::n_ubatch() const { | ||
| 600 | return cparams.n_ubatch; | ||
| 601 | } | ||
| 602 | |||
| 603 | uint32_t llama_context::n_seq_max() const { | ||
| 604 | return cparams.n_seq_max; | ||
| 605 | } | ||
| 606 | |||
| 607 | uint32_t llama_context::n_threads() const { | ||
| 608 | return cparams.n_threads; | ||
| 609 | } | ||
| 610 | |||
| 611 | uint32_t llama_context::n_threads_batch() const { | ||
| 612 | return cparams.n_threads_batch; | ||
| 613 | } | ||
| 614 | |||
| 615 | llama_memory_t llama_context::get_memory() const { | ||
| 616 | return memory.get(); | ||
| 617 | } | ||
| 618 | |||
| 619 | bool llama_context::memory_update(bool optimize) { | ||
| 620 | if (!memory) { | ||
| 621 | return false; | ||
| 622 | } | ||
| 623 | |||
| 624 | { | ||
| 625 | const auto mctx = memory->init_update(this, optimize); | ||
| 626 | switch (mctx->get_status()) { | ||
| 627 | case LLAMA_MEMORY_STATUS_SUCCESS: | ||
| 628 | { | ||
| 629 | // noop | ||
| 630 | } break; | ||
| 631 | case LLAMA_MEMORY_STATUS_NO_UPDATE: | ||
| 632 | { | ||
| 633 | // no updates need to be performed | ||
| 634 | return false; | ||
| 635 | } | ||
| 636 | case LLAMA_MEMORY_STATUS_FAILED_PREPARE: | ||
| 637 | case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: | ||
| 638 | { | ||
| 639 | LLAMA_LOG_ERROR("%s: failed to prepare memory update\n", __func__); | ||
| 640 | return false; | ||
| 641 | } | ||
| 642 | } | ||
| 643 | |||
| 644 | // reset the previous graph result to make sure that it won't be reused | ||
| 645 | // TODO: change the mctx->apply() to return information if a graph reserve is needed | ||
| 646 | // reset the graph result only if the memory module did reset the scheduler | ||
| 647 | gf_res_prev->reset(); | ||
| 648 | |||
| 649 | if (!mctx->apply()) { | ||
| 650 | LLAMA_LOG_ERROR("%s: failed to apply memory update\n", __func__); | ||
| 651 | } | ||
| 652 | } | ||
| 653 | |||
| 654 | // if the memory module did any computation, we have to reserve a new worst-case graph | ||
| 655 | { | ||
| 656 | const auto mctx = memory->init_full(); | ||
| 657 | if (!mctx) { | ||
| 658 | throw std::runtime_error("failed to initialize memory context"); | ||
| 659 | } | ||
| 660 | |||
| 661 | const uint32_t n_seqs = cparams.n_seq_max; | ||
| 662 | const uint32_t n_tokens = std::min(cparams.n_ctx, cparams.n_ubatch); | ||
| 663 | |||
| 664 | auto * gf = graph_reserve(n_tokens, n_seqs, n_tokens, mctx.get()); | ||
| 665 | if (!gf) { | ||
| 666 | LLAMA_LOG_ERROR("%s: failed to reserve graph after the memory update\n", __func__); | ||
| 667 | } | ||
| 668 | } | ||
| 669 | |||
| 670 | return true; | ||
| 671 | } | ||
| 672 | |||
| 673 | enum llama_pooling_type llama_context::pooling_type() const { | ||
| 674 | return cparams.pooling_type; | ||
| 675 | } | ||
| 676 | |||
| 677 | float * llama_context::get_logits() { | ||
| 678 | output_reorder(); | ||
| 679 | |||
| 680 | return logits.data; | ||
| 681 | } | ||
| 682 | |||
| 683 | int64_t llama_context::output_resolve_row(int32_t i) const { | ||
| 684 | int64_t j = -1; | ||
| 685 | |||
| 686 | // support negative indices (last output row) | ||
| 687 | if (i < 0) { | ||
| 688 | j = n_outputs + i; | ||
| 689 | if (j < 0) { | ||
| 690 | throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs)); | ||
| 691 | } | ||
| 692 | } else if ((size_t) i >= output_ids.size()) { | ||
| 693 | throw std::runtime_error(format("out of range [0, %zu)", output_ids.size())); | ||
| 694 | } else { | ||
| 695 | // use output_ids to translate the batch token index into a row number | ||
| 696 | // that holds this token's data. | ||
| 697 | j = output_ids[i]; | ||
| 698 | } | ||
| 699 | |||
| 700 | if (j < 0) { | ||
| 701 | // the batch token was not configured to output anything | ||
| 702 | throw std::runtime_error(format("batch.logits[%d] != true", i)); | ||
| 703 | } | ||
| 704 | |||
| 705 | if (j >= n_outputs) { | ||
| 706 | throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); | ||
| 707 | } | ||
| 708 | |||
| 709 | return j; | ||
| 710 | } | ||
| 711 | |||
| 712 | float * llama_context::get_logits_ith(int32_t i) { | ||
| 713 | int64_t j = -1; | ||
| 714 | |||
| 715 | output_reorder(); | ||
| 716 | |||
| 717 | try { | ||
| 718 | if (logits.data == nullptr) { | ||
| 719 | throw std::runtime_error("no logits"); | ||
| 720 | } | ||
| 721 | |||
| 722 | // TODO: use output_resolve_row() | ||
| 723 | if (i < 0) { | ||
| 724 | j = n_outputs + i; | ||
| 725 | if (j < 0) { | ||
| 726 | throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs)); | ||
| 727 | } | ||
| 728 | } else if ((size_t) i >= output_ids.size()) { | ||
| 729 | throw std::runtime_error(format("out of range [0, %zu)", output_ids.size())); | ||
| 730 | } else { | ||
| 731 | j = output_ids[i]; | ||
| 732 | } | ||
| 733 | |||
| 734 | if (j < 0) { | ||
| 735 | throw std::runtime_error(format("batch.logits[%d] != true", i)); | ||
| 736 | } | ||
| 737 | if (j >= n_outputs) { | ||
| 738 | // This should not happen | ||
| 739 | throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); | ||
| 740 | } | ||
| 741 | |||
| 742 | return logits.data + j*model.vocab.n_tokens(); | ||
| 743 | } catch (const std::exception & err) { | ||
| 744 | LLAMA_LOG_ERROR("%s: invalid logits id %d, reason: %s\n", __func__, i, err.what()); | ||
| 745 | #ifndef NDEBUG | ||
| 746 | GGML_ABORT("fatal error"); | ||
| 747 | #else | ||
| 748 | return nullptr; | ||
| 749 | #endif | ||
| 750 | } | ||
| 751 | } | ||
| 752 | |||
| 753 | float * llama_context::get_embeddings() { | ||
| 754 | output_reorder(); | ||
| 755 | |||
| 756 | return embd.data; | ||
| 757 | } | ||
| 758 | |||
| 759 | llama_token * llama_context::get_sampled_tokens() const{ | ||
| 760 | return sampling.sampled.data; | ||
| 761 | } | ||
| 762 | |||
| 763 | float * llama_context::get_embeddings_ith(int32_t i) { | ||
| 764 | int64_t j = -1; | ||
| 765 | |||
| 766 | output_reorder(); | ||
| 767 | |||
| 768 | try { | ||
| 769 | if (embd.data == nullptr) { | ||
| 770 | throw std::runtime_error("no embeddings"); | ||
| 771 | } | ||
| 772 | |||
| 773 | // TODO: use output_resolve_row() | ||
| 774 | if (i < 0) { | ||
| 775 | j = n_outputs + i; | ||
| 776 | if (j < 0) { | ||
| 777 | throw std::runtime_error(format("negative index out of range [0, %d)", n_outputs)); | ||
| 778 | } | ||
| 779 | } else if ((size_t) i >= output_ids.size()) { | ||
| 780 | throw std::runtime_error(format("out of range [0, %zu)", output_ids.size())); | ||
| 781 | } else { | ||
| 782 | j = output_ids[i]; | ||
| 783 | } | ||
| 784 | |||
| 785 | if (j < 0) { | ||
| 786 | throw std::runtime_error(format("batch.logits[%d] != true", i)); | ||
| 787 | } | ||
| 788 | if (j >= n_outputs) { | ||
| 789 | // This should not happen | ||
| 790 | throw std::runtime_error(format("corrupt output buffer (j=%" PRId64 ", n_outputs=%d)", j, n_outputs)); | ||
| 791 | } | ||
| 792 | |||
| 793 | const uint32_t n_embd_out = model.hparams.n_embd_out(); | ||
| 794 | return embd.data + j*n_embd_out; | ||
| 795 | } catch (const std::exception & err) { | ||
| 796 | LLAMA_LOG_ERROR("%s: invalid embeddings id %d, reason: %s\n", __func__, i, err.what()); | ||
| 797 | #ifndef NDEBUG | ||
| 798 | GGML_ABORT("fatal error"); | ||
| 799 | #else | ||
| 800 | return nullptr; | ||
| 801 | #endif | ||
| 802 | } | ||
| 803 | } | ||
| 804 | |||
| 805 | float * llama_context::get_embeddings_seq(llama_seq_id seq_id) { | ||
| 806 | auto it = embd_seq.find(seq_id); | ||
| 807 | if (it == embd_seq.end()) { | ||
| 808 | return nullptr; | ||
| 809 | } | ||
| 810 | |||
| 811 | return it->second.data(); | ||
| 812 | } | ||
| 813 | |||
| 814 | llama_token llama_context::get_sampled_token_ith(int32_t idx) { | ||
| 815 | output_reorder(); | ||
| 816 | |||
| 817 | if (!sampling.sampled.has_data()) { | ||
| 818 | return LLAMA_TOKEN_NULL; | ||
| 819 | } | ||
| 820 | |||
| 821 | try { | ||
| 822 | const int64_t row = output_resolve_row(idx); | ||
| 823 | GGML_ASSERT(row < (int64_t) sampling.sampled.size); | ||
| 824 | return sampling.sampled.data[row]; | ||
| 825 | } catch (const std::exception & err) { | ||
| 826 | LLAMA_LOG_ERROR("%s: invalid backend sampled token id %d, reason: %s\n", __func__, idx, err.what()); | ||
| 827 | return LLAMA_TOKEN_NULL; | ||
| 828 | } | ||
| 829 | } | ||
| 830 | |||
| 831 | float * llama_context::get_sampled_probs_ith(int32_t idx) { | ||
| 832 | output_reorder(); | ||
| 833 | |||
| 834 | if (!sampling.probs.has_data()) { | ||
| 835 | return nullptr; | ||
| 836 | } | ||
| 837 | |||
| 838 | try { | ||
| 839 | const int64_t row = output_resolve_row(idx); | ||
| 840 | if ((size_t) row >= sampling.probs_count.size() || sampling.probs_count[row] == 0) { | ||
| 841 | return nullptr; | ||
| 842 | } | ||
| 843 | return sampling.probs.data + row*model.vocab.n_tokens(); | ||
| 844 | } catch (const std::exception & err) { | ||
| 845 | LLAMA_LOG_ERROR("%s: invalid backend sampled probs id %d, reason: %s\n", __func__, idx, err.what()); | ||
| 846 | return nullptr; | ||
| 847 | } | ||
| 848 | } | ||
| 849 | |||
| 850 | float * llama_context::get_sampled_logits_ith(int32_t idx) { | ||
| 851 | output_reorder(); | ||
| 852 | |||
| 853 | if (!sampling.logits.has_data()) { | ||
| 854 | return nullptr; | ||
| 855 | } | ||
| 856 | |||
| 857 | try { | ||
| 858 | const int64_t row = output_resolve_row(idx); | ||
| 859 | if ((size_t) row >= sampling.logits_count.size() || sampling.logits_count[row] == 0) { | ||
| 860 | return nullptr; | ||
| 861 | } | ||
| 862 | return sampling.logits.data + row*model.vocab.n_tokens(); | ||
| 863 | } catch (const std::exception & err) { | ||
| 864 | LLAMA_LOG_ERROR("%s: invalid backend sampled logits id %d, reason: %s\n", __func__, idx, err.what()); | ||
| 865 | return nullptr; | ||
| 866 | } | ||
| 867 | } | ||
| 868 | |||
| 869 | const llama_token * llama_context::get_sampled_candidates_ith(int32_t idx) { | ||
| 870 | output_reorder(); | ||
| 871 | |||
| 872 | try { | ||
| 873 | const int64_t row = output_resolve_row(idx); | ||
| 874 | if (sampling.candidates.has_data() && | ||
| 875 | (size_t) row < sampling.candidates_count.size() && | ||
| 876 | sampling.candidates_count[row] > 0) { | ||
| 877 | return sampling.candidates.data + row*model.vocab.n_tokens(); | ||
| 878 | } | ||
| 879 | } catch (const std::exception & err) { | ||
| 880 | // fallback to full vocab list | ||
| 881 | } | ||
| 882 | |||
| 883 | return sampling.token_ids_full_vocab.data(); | ||
| 884 | } | ||
| 885 | |||
| 886 | size_t llama_context::get_sampled_candidates_count(int32_t idx) { | ||
| 887 | output_reorder(); | ||
| 888 | |||
| 889 | if (!sampling.candidates.has_data()) { | ||
| 890 | return 0; | ||
| 891 | } | ||
| 892 | |||
| 893 | try { | ||
| 894 | const int64_t row = output_resolve_row(idx); | ||
| 895 | if ((size_t) row >= sampling.candidates_count.size()) { | ||
| 896 | return 0; | ||
| 897 | } | ||
| 898 | return sampling.candidates_count[row]; | ||
| 899 | } catch (const std::exception & err) { | ||
| 900 | LLAMA_LOG_ERROR("%s: invalid backend sampled candidates count id %d, reason: %s\n", __func__, idx, err.what()); | ||
| 901 | return 0; | ||
| 902 | } | ||
| 903 | } | ||
| 904 | |||
| 905 | size_t llama_context::get_sampled_logits_count(int32_t idx) { | ||
| 906 | output_reorder(); | ||
| 907 | |||
| 908 | if (!sampling.logits.has_data()) { | ||
| 909 | return model.vocab.n_tokens(); | ||
| 910 | } | ||
| 911 | |||
| 912 | try { | ||
| 913 | const int64_t row = output_resolve_row(idx); | ||
| 914 | if ((size_t) row >= sampling.logits_count.size()) { | ||
| 915 | return 0; | ||
| 916 | } | ||
| 917 | return sampling.logits_count[row]; | ||
| 918 | } catch (const std::exception & err) { | ||
| 919 | LLAMA_LOG_ERROR("%s: invalid backend sampled logits count id %d, reason: %s\n", __func__, idx, err.what()); | ||
| 920 | return 0; | ||
| 921 | } | ||
| 922 | } | ||
| 923 | |||
| 924 | size_t llama_context::get_sampled_probs_count(int32_t idx) { | ||
| 925 | output_reorder(); | ||
| 926 | |||
| 927 | if (!sampling.probs.has_data()) { | ||
| 928 | return 0; | ||
| 929 | } | ||
| 930 | |||
| 931 | try { | ||
| 932 | const int64_t row = output_resolve_row(idx); | ||
| 933 | if ((size_t) row >= sampling.probs_count.size()) { | ||
| 934 | return 0; | ||
| 935 | } | ||
| 936 | return sampling.probs_count[row]; | ||
| 937 | } catch (const std::exception & err) { | ||
| 938 | LLAMA_LOG_ERROR("%s: invalid backend sampled probs count id %d, reason: %s\n", __func__, idx, err.what()); | ||
| 939 | return 0; | ||
| 940 | } | ||
| 941 | } | ||
| 942 | |||
| 943 | |||
| 944 | void llama_context::attach_threadpool( | ||
| 945 | ggml_threadpool_t threadpool, | ||
| 946 | ggml_threadpool_t threadpool_batch) { | ||
| 947 | LLAMA_LOG_DEBUG("%s: call\n", __func__); | ||
| 948 | |||
| 949 | this->threadpool = threadpool; | ||
| 950 | this->threadpool_batch = threadpool_batch ? threadpool_batch : threadpool; | ||
| 951 | } | ||
| 952 | |||
| 953 | void llama_context::detach_threadpool() { | ||
| 954 | LLAMA_LOG_DEBUG("%s: call\n", __func__); | ||
| 955 | |||
| 956 | this->threadpool = nullptr; | ||
| 957 | this->threadpool_batch = nullptr; | ||
| 958 | } | ||
| 959 | |||
| 960 | void llama_context::set_n_threads(int32_t n_threads, int32_t n_threads_batch) { | ||
| 961 | LLAMA_LOG_DEBUG("%s: n_threads = %d, n_threads_batch = %d\n", __func__, n_threads, n_threads_batch); | ||
| 962 | |||
| 963 | cparams.n_threads = n_threads; | ||
| 964 | cparams.n_threads_batch = n_threads_batch; | ||
| 965 | } | ||
| 966 | |||
| 967 | void llama_context::set_abort_callback(bool (*abort_callback)(void * data), void * abort_callback_data) { | ||
| 968 | LLAMA_LOG_DEBUG("%s: call\n", __func__); | ||
| 969 | |||
| 970 | this->abort_callback = abort_callback; | ||
| 971 | this->abort_callback_data = abort_callback_data; | ||
| 972 | |||
| 973 | for (auto & backend : backends) { | ||
| 974 | auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend.get())); | ||
| 975 | auto * set_abort_callback_fn = (ggml_backend_set_abort_callback_t) ggml_backend_reg_get_proc_address(reg, "ggml_backend_set_abort_callback"); | ||
| 976 | if (set_abort_callback_fn) { | ||
| 977 | set_abort_callback_fn(backend.get(), this->abort_callback, this->abort_callback_data); | ||
| 978 | } | ||
| 979 | } | ||
| 980 | } | ||
| 981 | |||
| 982 | void llama_context::set_embeddings(bool value) { | ||
| 983 | LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); | ||
| 984 | |||
| 985 | cparams.embeddings = value; | ||
| 986 | |||
| 987 | // TODO: not sure yet if we want to reserve here | ||
| 988 | //sched_need_reserve = true; | ||
| 989 | } | ||
| 990 | |||
| 991 | void llama_context::set_causal_attn(bool value) { | ||
| 992 | LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); | ||
| 993 | |||
| 994 | if (cparams.causal_attn == value) { | ||
| 995 | return; | ||
| 996 | } | ||
| 997 | |||
| 998 | cparams.causal_attn = value; | ||
| 999 | |||
| 1000 | sched_need_reserve = true; | ||
| 1001 | } | ||
| 1002 | |||
| 1003 | void llama_context::set_warmup(bool value) { | ||
| 1004 | LLAMA_LOG_DEBUG("%s: value = %d\n", __func__, value); | ||
| 1005 | |||
| 1006 | if (cparams.warmup == value) { | ||
| 1007 | return; | ||
| 1008 | } | ||
| 1009 | |||
| 1010 | cparams.warmup = value; | ||
| 1011 | |||
| 1012 | // warmups are usually with small batches, so no need to reserve | ||
| 1013 | //sched_need_reserve = true; | ||
| 1014 | } | ||
| 1015 | |||
| 1016 | bool llama_context::set_sampler(llama_seq_id seq_id, llama_sampler * sampler) { | ||
| 1017 | if (!sampler && sampling.samplers.count(seq_id) == 0) { | ||
| 1018 | return true; | ||
| 1019 | } | ||
| 1020 | |||
| 1021 | LLAMA_LOG_DEBUG("%s: seq_id = %d, sampler = %p\n", __func__, (int) seq_id, (void *) sampler); | ||
| 1022 | |||
| 1023 | const bool can_offload = | ||
| 1024 | sampler && | ||
| 1025 | sampler->iface->backend_init && | ||
| 1026 | sampler->iface->backend_apply && | ||
| 1027 | llama_sampler_chain_n(sampler) > 0; | ||
| 1028 | |||
| 1029 | if (sampler && can_offload) { | ||
| 1030 | auto * buft = ggml_backend_dev_buffer_type(model.dev_output()); | ||
| 1031 | |||
| 1032 | sampler->iface->backend_init(sampler, buft); | ||
| 1033 | |||
| 1034 | sampling.samplers[seq_id] = sampler; | ||
| 1035 | |||
| 1036 | sched_need_reserve = true; | ||
| 1037 | |||
| 1038 | return true; | ||
| 1039 | } | ||
| 1040 | |||
| 1041 | if (sampler && !can_offload) { | ||
| 1042 | LLAMA_LOG_WARN("%s: sampler '%s' for seq_id = %d, cannot be offloaded to the backend\n", __func__, llama_sampler_name(sampler), seq_id); | ||
| 1043 | |||
| 1044 | if (sampling.samplers.count(seq_id) > 0) { | ||
| 1045 | sched_need_reserve = true; | ||
| 1046 | } | ||
| 1047 | |||
| 1048 | sampling.samplers.erase(seq_id); | ||
| 1049 | |||
| 1050 | return false; | ||
| 1051 | } | ||
| 1052 | |||
| 1053 | sampling.samplers.erase(seq_id); | ||
| 1054 | |||
| 1055 | sched_need_reserve = true; | ||
| 1056 | |||
| 1057 | return true; | ||
| 1058 | } | ||
| 1059 | |||
| 1060 | void llama_context::set_adapter_lora( | ||
| 1061 | llama_adapter_lora * adapter, | ||
| 1062 | float scale) { | ||
| 1063 | LLAMA_LOG_DEBUG("%s: adapter = %p, scale = %f\n", __func__, (void *) adapter, scale); | ||
| 1064 | |||
| 1065 | if (auto it = loras.find(adapter); it != loras.end()) { | ||
| 1066 | if (it->second == scale) { | ||
| 1067 | return; | ||
| 1068 | } | ||
| 1069 | } | ||
| 1070 | |||
| 1071 | loras[adapter] = scale; | ||
| 1072 | |||
| 1073 | sched_need_reserve = true; | ||
| 1074 | } | ||
| 1075 | |||
| 1076 | bool llama_context::rm_adapter_lora( | ||
| 1077 | llama_adapter_lora * adapter) { | ||
| 1078 | LLAMA_LOG_DEBUG("%s: adapter = %p\n", __func__, (void *) adapter); | ||
| 1079 | |||
| 1080 | auto it = loras.find(adapter); | ||
| 1081 | if (it != loras.end()) { | ||
| 1082 | loras.erase(it); | ||
| 1083 | |||
| 1084 | sched_need_reserve = true; | ||
| 1085 | |||
| 1086 | return true; | ||
| 1087 | } | ||
| 1088 | |||
| 1089 | return false; | ||
| 1090 | } | ||
| 1091 | |||
| 1092 | void llama_context::clear_adapter_lora() { | ||
| 1093 | LLAMA_LOG_DEBUG("%s: call\n", __func__); | ||
| 1094 | |||
| 1095 | if (loras.empty()) { | ||
| 1096 | return; | ||
| 1097 | } | ||
| 1098 | |||
| 1099 | loras.clear(); | ||
| 1100 | |||
| 1101 | sched_need_reserve = true; | ||
| 1102 | } | ||
| 1103 | |||
| 1104 | bool llama_context::apply_adapter_cvec( | ||
| 1105 | const float * data, | ||
| 1106 | size_t len, | ||
| 1107 | int32_t n_embd, | ||
| 1108 | int32_t il_start, | ||
| 1109 | int32_t il_end) { | ||
| 1110 | LLAMA_LOG_DEBUG("%s: il_start = %d, il_end = %d\n", __func__, il_start, il_end); | ||
| 1111 | |||
| 1112 | // TODO: should we reserve? | ||
| 1113 | |||
| 1114 | return cvec.apply(model, data, len, n_embd, il_start, il_end); | ||
| 1115 | } | ||
| 1116 | |||
| 1117 | llm_graph_result * llama_context::process_ubatch(const llama_ubatch & ubatch, llm_graph_type gtype, llama_memory_context_i * mctx, ggml_status & ret) { | ||
| 1118 | if (mctx && !mctx->apply()) { | ||
| 1119 | LLAMA_LOG_ERROR("%s: failed to apply memory context\n", __func__); | ||
| 1120 | ret = GGML_STATUS_FAILED; | ||
| 1121 | return nullptr; | ||
| 1122 | } | ||
| 1123 | |||
| 1124 | auto * res = gf_res_prev.get(); | ||
| 1125 | auto * gf = res->get_gf(); | ||
| 1126 | |||
| 1127 | // the new graph parameters | ||
| 1128 | // in order to correctly reuse a graph, it's full topology has to be uniquely determined by these parameters | ||
| 1129 | const auto gparams = graph_params(res, ubatch, mctx, gtype); | ||
| 1130 | |||
| 1131 | if (!graph_reuse_disable && res->can_reuse(gparams)) { | ||
| 1132 | //LLAMA_LOG_DEBUG("%s: reusing previous graph\n", __func__); | ||
| 1133 | |||
| 1134 | n_reused++; | ||
| 1135 | } else { | ||
| 1136 | res->reset(); | ||
| 1137 | |||
| 1138 | ggml_backend_sched_reset(sched.get()); | ||
| 1139 | ggml_backend_sched_set_eval_callback(sched.get(), cparams.cb_eval, cparams.cb_eval_user_data); | ||
| 1140 | |||
| 1141 | //const auto t_start_us = ggml_time_us(); | ||
| 1142 | |||
| 1143 | gf = model.build_graph(gparams); | ||
| 1144 | |||
| 1145 | //LLAMA_LOG_INFO("graph build time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0); | ||
| 1146 | |||
| 1147 | if (!gf) { | ||
| 1148 | LLAMA_LOG_ERROR("%s: failed to initialize graph\n", __func__); | ||
| 1149 | ret = GGML_STATUS_FAILED; | ||
| 1150 | return nullptr; | ||
| 1151 | } | ||
| 1152 | |||
| 1153 | if (!ggml_backend_sched_alloc_graph(sched.get(), gf)) { | ||
| 1154 | LLAMA_LOG_ERROR("%s: failed to allocate graph\n", __func__); | ||
| 1155 | ret = GGML_STATUS_ALLOC_FAILED; | ||
| 1156 | return nullptr; | ||
| 1157 | } | ||
| 1158 | } | ||
| 1159 | |||
| 1160 | // set the input data for the input tensors | ||
| 1161 | { | ||
| 1162 | //const auto t_start_us = ggml_time_us(); | ||
| 1163 | |||
| 1164 | res->set_inputs(&ubatch); | ||
| 1165 | |||
| 1166 | //LLAMA_LOG_INFO("graph set inputs time: %.3f ms\n", (ggml_time_us() - t_start_us)/1000.0); | ||
| 1167 | } | ||
| 1168 | |||
| 1169 | const auto status = graph_compute(res->get_gf(), ubatch.n_tokens > 1); | ||
| 1170 | if (status != GGML_STATUS_SUCCESS) { | ||
| 1171 | LLAMA_LOG_ERROR("%s: failed to compute graph, compute status: %d\n", __func__, status); | ||
| 1172 | ret = status; | ||
| 1173 | return nullptr; | ||
| 1174 | } | ||
| 1175 | |||
| 1176 | ret = GGML_STATUS_SUCCESS; | ||
| 1177 | |||
| 1178 | return res; | ||
| 1179 | } | ||
| 1180 | |||
| 1181 | int llama_context::encode(const llama_batch & batch_inp) { | ||
| 1182 | GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT | ||
| 1183 | |||
| 1184 | if (batch_inp.n_tokens == 0) { | ||
| 1185 | LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); | ||
| 1186 | return -1; | ||
| 1187 | } | ||
| 1188 | |||
| 1189 | const auto & hparams = model.hparams; | ||
| 1190 | |||
| 1191 | const int64_t n_embd = hparams.n_embd_inp(); | ||
| 1192 | const int64_t n_vocab = model.vocab.n_tokens(); | ||
| 1193 | |||
| 1194 | // note: during encode, we always pass the full sequence starting from pos = 0 | ||
| 1195 | if (!balloc->init(batch_inp, model.vocab, nullptr, n_embd, cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) { | ||
| 1196 | LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); | ||
| 1197 | return -1; | ||
| 1198 | } | ||
| 1199 | |||
| 1200 | const uint32_t n_tokens = balloc->get_n_tokens(); | ||
| 1201 | |||
| 1202 | // [TAG_NO_CACHE_PAD] | ||
| 1203 | // TODO: add new split mode where we pad the input sequences so that ubatch.equal_seqs == true | ||
| 1204 | const llama_ubatch ubatch = balloc->split_simple(n_tokens); | ||
| 1205 | |||
| 1206 | // micro-batching is not possible for non-causal encoding, so we process the batch in a single shot | ||
| 1207 | GGML_ASSERT(cparams.n_ubatch >= n_tokens && "encoder requires n_ubatch >= n_tokens"); | ||
| 1208 | |||
| 1209 | if (t_compute_start_us == 0) { | ||
| 1210 | t_compute_start_us = ggml_time_us(); | ||
| 1211 | } | ||
| 1212 | |||
| 1213 | // TODO: this clear of the buffer can easily be forgotten - need something better | ||
| 1214 | embd_seq.clear(); | ||
| 1215 | |||
| 1216 | sched_reserve(); | ||
| 1217 | |||
| 1218 | n_queued_tokens += n_tokens; | ||
| 1219 | |||
| 1220 | // reserve output buffer | ||
| 1221 | if (output_reserve(n_tokens) < n_tokens) { | ||
| 1222 | LLAMA_LOG_ERROR("%s: could not reserve space for batch with %u outputs\n", __func__, n_tokens); | ||
| 1223 | return -2; | ||
| 1224 | }; | ||
| 1225 | |||
| 1226 | for (uint32_t i = 0; i < n_tokens; ++i) { | ||
| 1227 | output_ids[i] = i; | ||
| 1228 | } | ||
| 1229 | |||
| 1230 | n_outputs = n_tokens; | ||
| 1231 | |||
| 1232 | const auto causal_attn_org = cparams.causal_attn; | ||
| 1233 | |||
| 1234 | // always use non-causal attention for encoder graphs | ||
| 1235 | // TODO: this is a tmp solution until we have a proper way to support enc-dec models | ||
| 1236 | // ref: https://github.com/ggml-org/llama.cpp/pull/12181#issuecomment-2730451223 | ||
| 1237 | cparams.causal_attn = false; | ||
| 1238 | |||
| 1239 | ggml_status status; | ||
| 1240 | const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_ENCODER, nullptr, status); | ||
| 1241 | |||
| 1242 | cparams.causal_attn = causal_attn_org; | ||
| 1243 | |||
| 1244 | if (!res) { | ||
| 1245 | switch (status) { | ||
| 1246 | case GGML_STATUS_ABORTED: return 2; | ||
| 1247 | case GGML_STATUS_ALLOC_FAILED: return -2; | ||
| 1248 | case GGML_STATUS_FAILED: return -3; | ||
| 1249 | case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); | ||
| 1250 | } | ||
| 1251 | } | ||
| 1252 | |||
| 1253 | auto * t_logits = res->get_logits(); | ||
| 1254 | auto * t_embd = res->get_embd_pooled() ? res->get_embd_pooled() : res->get_embd(); | ||
| 1255 | |||
| 1256 | // extract logits | ||
| 1257 | if (logits.data && t_logits) { | ||
| 1258 | ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits); | ||
| 1259 | GGML_ASSERT(backend_res != nullptr); | ||
| 1260 | GGML_ASSERT(logits.data != nullptr); | ||
| 1261 | |||
| 1262 | ggml_backend_tensor_get_async(backend_res, t_logits, logits.data, 0, n_tokens*n_vocab*sizeof(float)); | ||
| 1263 | } | ||
| 1264 | |||
| 1265 | // extract embeddings | ||
| 1266 | if (embd.data && t_embd) { | ||
| 1267 | ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd); | ||
| 1268 | GGML_ASSERT(backend_embd != nullptr); | ||
| 1269 | |||
| 1270 | switch (cparams.pooling_type) { | ||
| 1271 | case LLAMA_POOLING_TYPE_NONE: | ||
| 1272 | { | ||
| 1273 | // extract token embeddings | ||
| 1274 | GGML_ASSERT(embd.data != nullptr); | ||
| 1275 | const uint32_t n_embd_out = hparams.n_embd_out(); | ||
| 1276 | |||
| 1277 | GGML_ASSERT(n_tokens*n_embd_out <= (int64_t) embd.size); | ||
| 1278 | ggml_backend_tensor_get_async(backend_embd, t_embd, embd.data, 0, n_tokens*n_embd_out*sizeof(float)); | ||
| 1279 | } break; | ||
| 1280 | case LLAMA_POOLING_TYPE_MEAN: | ||
| 1281 | case LLAMA_POOLING_TYPE_CLS: | ||
| 1282 | case LLAMA_POOLING_TYPE_LAST: | ||
| 1283 | { | ||
| 1284 | // extract sequence embeddings | ||
| 1285 | auto & embd_seq_out = embd_seq; | ||
| 1286 | |||
| 1287 | for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { | ||
| 1288 | const llama_seq_id seq_id = ubatch.seq_id_unq[s]; | ||
| 1289 | const int32_t seq_idx = ubatch.seq_idx[seq_id]; | ||
| 1290 | |||
| 1291 | embd_seq_out[seq_id].resize(n_embd); | ||
| 1292 | ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); | ||
| 1293 | } | ||
| 1294 | } break; | ||
| 1295 | case LLAMA_POOLING_TYPE_RANK: | ||
| 1296 | { | ||
| 1297 | // extract the rerank score - n_cls_out floats per sequence | ||
| 1298 | auto & embd_seq_out = embd_seq; | ||
| 1299 | |||
| 1300 | const uint32_t n_cls_out = hparams.n_cls_out; | ||
| 1301 | |||
| 1302 | for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { | ||
| 1303 | const llama_seq_id seq_id = ubatch.seq_id_unq[s]; | ||
| 1304 | const int32_t seq_idx = ubatch.seq_idx[seq_id]; | ||
| 1305 | |||
| 1306 | embd_seq_out[seq_id].resize(n_cls_out); | ||
| 1307 | ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); | ||
| 1308 | } | ||
| 1309 | } break; | ||
| 1310 | case LLAMA_POOLING_TYPE_UNSPECIFIED: | ||
| 1311 | { | ||
| 1312 | GGML_ABORT("unknown pooling type"); | ||
| 1313 | } | ||
| 1314 | } | ||
| 1315 | } | ||
| 1316 | |||
| 1317 | // TODO: hacky solution | ||
| 1318 | if (model.arch == LLM_ARCH_T5 && t_embd) { | ||
| 1319 | //cross.t_embd = t_embd; | ||
| 1320 | |||
| 1321 | synchronize(); | ||
| 1322 | |||
| 1323 | cross.n_embd = t_embd->ne[0]; | ||
| 1324 | cross.n_enc = t_embd->ne[1]; | ||
| 1325 | cross.v_embd.resize(cross.n_embd*cross.n_enc); | ||
| 1326 | memcpy(cross.v_embd.data(), embd.data, ggml_nbytes(t_embd)); | ||
| 1327 | |||
| 1328 | const auto & batch = balloc->get_batch(); | ||
| 1329 | |||
| 1330 | // remember the sequence ids used during the encoding - needed for cross attention later | ||
| 1331 | cross.seq_ids_enc.resize(n_tokens); | ||
| 1332 | for (uint32_t i = 0; i < n_tokens; i++) { | ||
| 1333 | cross.seq_ids_enc[i].clear(); | ||
| 1334 | |||
| 1335 | for (int s = 0; s < batch.n_seq_id[i]; s++) { | ||
| 1336 | const llama_seq_id seq_id = batch.seq_id[i][s]; | ||
| 1337 | |||
| 1338 | cross.seq_ids_enc[i].insert(seq_id); | ||
| 1339 | } | ||
| 1340 | } | ||
| 1341 | } | ||
| 1342 | |||
| 1343 | return 0; | ||
| 1344 | } | ||
| 1345 | |||
| 1346 | static std::map<llama_seq_id, uint32_t> build_seq_to_output_row(const llama_ubatch & ubatch, uint32_t row_offset) { | ||
| 1347 | std::map<llama_seq_id, uint32_t> seq_to_row; | ||
| 1348 | // how many output tokens we have seen so far for this ubatch. | ||
| 1349 | uint32_t local = 0; | ||
| 1350 | for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { | ||
| 1351 | // skip tokens that are not output. | ||
| 1352 | if (!ubatch.output[i]) { | ||
| 1353 | continue; | ||
| 1354 | } | ||
| 1355 | |||
| 1356 | const llama_seq_id seq_id = ubatch.seq_id[i][0]; | ||
| 1357 | // row_offset is the number of output tokens before this ubatch. | ||
| 1358 | seq_to_row[seq_id] = row_offset + local; | ||
| 1359 | ++local; | ||
| 1360 | } | ||
| 1361 | return seq_to_row; | ||
| 1362 | } | ||
| 1363 | |||
| 1364 | static void copy_tensor_async_ints( | ||
| 1365 | const std::map<llama_seq_id, ggml_tensor*> & tensor_map, | ||
| 1366 | const buffer_view<llama_token> & sampled, | ||
| 1367 | const std::map<llama_seq_id, uint32_t> & seq_to_row, | ||
| 1368 | ggml_backend_sched_t sched) { | ||
| 1369 | if (!sampled.has_data()) { | ||
| 1370 | return; | ||
| 1371 | } | ||
| 1372 | |||
| 1373 | for (const auto & [seq_id, tensor] : tensor_map) { | ||
| 1374 | auto it = seq_to_row.find(seq_id); | ||
| 1375 | if (it == seq_to_row.end()) { | ||
| 1376 | continue; | ||
| 1377 | } | ||
| 1378 | |||
| 1379 | const uint32_t row = it->second; | ||
| 1380 | GGML_ASSERT(row < sampled.size); | ||
| 1381 | |||
| 1382 | GGML_ASSERT(ggml_is_contiguous(tensor) && "sampled tokens tensor must be contiguous for async copy"); | ||
| 1383 | |||
| 1384 | ggml_backend_t backend = ggml_backend_sched_get_tensor_backend(sched, tensor); | ||
| 1385 | ggml_backend_tensor_get_async(backend, tensor, sampled.data + row, 0, sizeof(sampled.data[row])); | ||
| 1386 | } | ||
| 1387 | } | ||
| 1388 | |||
| 1389 | static void copy_tensor_async_floats( | ||
| 1390 | const std::map<llama_seq_id, ggml_tensor*> & tensor_map, | ||
| 1391 | const buffer_view<float> & dst, | ||
| 1392 | size_t stride, | ||
| 1393 | std::vector<uint32_t> & counts, | ||
| 1394 | const std::map<llama_seq_id, uint32_t> & seq_to_row, | ||
| 1395 | ggml_backend_sched_t sched) { | ||
| 1396 | if (!dst.has_data()) { | ||
| 1397 | return; | ||
| 1398 | } | ||
| 1399 | |||
| 1400 | for (const auto & [seq_id, tensor] : tensor_map) { | ||
| 1401 | auto it = seq_to_row.find(seq_id); | ||
| 1402 | if (it == seq_to_row.end()) { | ||
| 1403 | continue; | ||
| 1404 | } | ||
| 1405 | |||
| 1406 | const uint32_t row = it->second; | ||
| 1407 | GGML_ASSERT(row < counts.size()); | ||
| 1408 | |||
| 1409 | GGML_ASSERT(ggml_is_contiguous(tensor) && "logits/probs tensor must be contiguous for async copy"); | ||
| 1410 | |||
| 1411 | ggml_backend_t backend = ggml_backend_sched_get_tensor_backend(sched, tensor); | ||
| 1412 | float * row_ptr = dst.data + (size_t) row * stride; | ||
| 1413 | ggml_backend_tensor_get_async(backend, tensor, row_ptr, 0, ggml_nbytes(tensor)); | ||
| 1414 | |||
| 1415 | // Update the actual number of logits/probabilities that were written for this row. | ||
| 1416 | counts[row] = ggml_nelements(tensor); | ||
| 1417 | } | ||
| 1418 | } | ||
| 1419 | |||
| 1420 | static void copy_tensor_async_candidates( | ||
| 1421 | const std::map<llama_seq_id, ggml_tensor*> & tensor_map, | ||
| 1422 | const buffer_view<llama_token> & dst, | ||
| 1423 | size_t stride, | ||
| 1424 | std::vector<uint32_t> & counts, | ||
| 1425 | const std::map<llama_seq_id, uint32_t> & seq_to_row, | ||
| 1426 | ggml_backend_sched_t sched) { | ||
| 1427 | if (!dst.has_data()) { | ||
| 1428 | return; | ||
| 1429 | } | ||
| 1430 | |||
| 1431 | for (const auto & [seq_id, tensor] : tensor_map) { | ||
| 1432 | auto it = seq_to_row.find(seq_id); | ||
| 1433 | if (it == seq_to_row.end()) { | ||
| 1434 | continue; | ||
| 1435 | } | ||
| 1436 | |||
| 1437 | const uint32_t row = it->second; | ||
| 1438 | GGML_ASSERT(row < counts.size()); | ||
| 1439 | |||
| 1440 | GGML_ASSERT(ggml_is_contiguous(tensor) && "candidates tensor must be contiguous for async copy"); | ||
| 1441 | |||
| 1442 | ggml_backend_t backend = ggml_backend_sched_get_tensor_backend(sched, tensor); | ||
| 1443 | llama_token * row_ptr = dst.data + (size_t) row * stride; | ||
| 1444 | ggml_backend_tensor_get_async(backend, tensor, row_ptr, 0, ggml_nbytes(tensor)); | ||
| 1445 | |||
| 1446 | // Update the actual number of candidates that were written. | ||
| 1447 | counts[row] = ggml_nelements(tensor); | ||
| 1448 | } | ||
| 1449 | } | ||
| 1450 | |||
| 1451 | static bool needs_raw_logits(const llama_ubatch & ubatch, const std::map<llama_seq_id, llama_sampler *> & samplers) { | ||
| 1452 | for (uint32_t i = 0; i < ubatch.n_tokens; i++) { | ||
| 1453 | if (!ubatch.output[i]) { | ||
| 1454 | continue; | ||
| 1455 | } | ||
| 1456 | |||
| 1457 | // Check if the output token has at least one sequence without a backend sampler. | ||
| 1458 | for (int32_t j = 0; j < ubatch.n_seq_id[i]; ++j) { | ||
| 1459 | llama_seq_id seq_id = ubatch.seq_id[i][j]; | ||
| 1460 | if (samplers.find(seq_id) == samplers.end()) { | ||
| 1461 | return true; | ||
| 1462 | } | ||
| 1463 | } | ||
| 1464 | } | ||
| 1465 | return false; // all sequences use backend sampling | ||
| 1466 | } | ||
| 1467 | |||
| 1468 | int llama_context::decode(const llama_batch & batch_inp) { | ||
| 1469 | GGML_ASSERT((!batch_inp.token && batch_inp.embd) || (batch_inp.token && !batch_inp.embd)); // NOLINT | ||
| 1470 | |||
| 1471 | if (!memory) { | ||
| 1472 | LLAMA_LOG_DEBUG("%s: cannot decode batches with this context (calling encode() instead)\n", __func__); | ||
| 1473 | return encode(batch_inp); | ||
| 1474 | } | ||
| 1475 | |||
| 1476 | if (batch_inp.n_tokens == 0) { | ||
| 1477 | LLAMA_LOG_ERROR("%s: n_tokens == 0\n", __func__); | ||
| 1478 | return -1; | ||
| 1479 | } | ||
| 1480 | |||
| 1481 | const auto & vocab = model.vocab; | ||
| 1482 | const auto & hparams = model.hparams; | ||
| 1483 | |||
| 1484 | const int64_t n_vocab = vocab.n_tokens(); | ||
| 1485 | const int64_t n_embd = hparams.n_embd_inp(); | ||
| 1486 | |||
| 1487 | // when computing embeddings, all tokens are output | ||
| 1488 | const bool output_all = cparams.embeddings; | ||
| 1489 | const bool has_samplers = !sampling.samplers.empty(); | ||
| 1490 | |||
| 1491 | const uint32_t n_seq_max = cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max; | ||
| 1492 | |||
| 1493 | // TODO: avoid this workaround in the future | ||
| 1494 | if (has_samplers && batch_inp.logits) { | ||
| 1495 | std::vector<int32_t> seq_output_count(n_seq_max, 0); | ||
| 1496 | |||
| 1497 | for (int32_t i = 0; i < batch_inp.n_tokens; ++i) { | ||
| 1498 | if (batch_inp.logits[i] == 0) { | ||
| 1499 | continue; | ||
| 1500 | } | ||
| 1501 | |||
| 1502 | const int ns = batch_inp.n_seq_id ? batch_inp.n_seq_id[i] : 1; | ||
| 1503 | |||
| 1504 | for (int32_t s = 0; s < ns; ++s) { | ||
| 1505 | const llama_seq_id seq_id = batch_inp.seq_id ? batch_inp.seq_id[i][s] : 0; | ||
| 1506 | |||
| 1507 | seq_output_count[seq_id]++; | ||
| 1508 | if (seq_output_count[seq_id] > 1) { | ||
| 1509 | LLAMA_LOG_ERROR("%s: backend sampling requires at most one output token per sequence (seq_id %d had %d)\n", | ||
| 1510 | __func__, seq_id, seq_output_count[seq_id]); | ||
| 1511 | return -1; | ||
| 1512 | } | ||
| 1513 | } | ||
| 1514 | } | ||
| 1515 | } | ||
| 1516 | |||
| 1517 | if (!balloc->init(batch_inp, vocab, memory.get(), n_embd, n_seq_max, output_all)) { | ||
| 1518 | LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); | ||
| 1519 | return -1; | ||
| 1520 | } | ||
| 1521 | |||
| 1522 | const uint32_t n_tokens_all = balloc->get_n_tokens(); | ||
| 1523 | const uint32_t n_outputs_all = balloc->get_n_outputs(); | ||
| 1524 | |||
| 1525 | if (output_all) { | ||
| 1526 | // require that all tokens are output | ||
| 1527 | if (n_outputs_all != n_tokens_all) { | ||
| 1528 | LLAMA_LOG_ERROR("%s: pooled embedding requires that all tokens are output (n_outputs_all = %d, n_tokens_all = %d)\n", | ||
| 1529 | __func__, n_outputs_all, n_tokens_all); | ||
| 1530 | return -1; | ||
| 1531 | } | ||
| 1532 | } | ||
| 1533 | |||
| 1534 | GGML_ASSERT(n_tokens_all <= cparams.n_batch); | ||
| 1535 | |||
| 1536 | GGML_ASSERT((cparams.causal_attn || cparams.n_ubatch >= n_tokens_all) && "non-causal attention requires n_ubatch >= n_tokens"); | ||
| 1537 | |||
| 1538 | if (t_compute_start_us == 0) { | ||
| 1539 | t_compute_start_us = ggml_time_us(); | ||
| 1540 | } | ||
| 1541 | n_queued_tokens += n_tokens_all; | ||
| 1542 | |||
| 1543 | // TODO: this clear of the buffer can easily be forgotten - need something better | ||
| 1544 | embd_seq.clear(); | ||
| 1545 | output_swaps.clear(); | ||
| 1546 | |||
| 1547 | sched_reserve(); | ||
| 1548 | |||
| 1549 | bool did_optimize = false; | ||
| 1550 | |||
| 1551 | // handle any pending shifts/copies | ||
| 1552 | memory_update(false); | ||
| 1553 | |||
| 1554 | llama_memory_context_ptr mctx; | ||
| 1555 | |||
| 1556 | while (true) { | ||
| 1557 | mctx = memory->init_batch(*balloc, cparams.n_ubatch, output_all); | ||
| 1558 | if (!mctx) { | ||
| 1559 | return -2; | ||
| 1560 | } | ||
| 1561 | |||
| 1562 | switch (mctx->get_status()) { | ||
| 1563 | case LLAMA_MEMORY_STATUS_SUCCESS: | ||
| 1564 | { | ||
| 1565 | } break; | ||
| 1566 | case LLAMA_MEMORY_STATUS_NO_UPDATE: | ||
| 1567 | { | ||
| 1568 | LLAMA_LOG_ERROR("%s: unexpected memory context status: %d\n", __func__, mctx->get_status()); | ||
| 1569 | |||
| 1570 | return -2; | ||
| 1571 | } | ||
| 1572 | case LLAMA_MEMORY_STATUS_FAILED_PREPARE: | ||
| 1573 | { | ||
| 1574 | if (!did_optimize) { | ||
| 1575 | did_optimize = true; | ||
| 1576 | |||
| 1577 | if (memory_update(true)) { | ||
| 1578 | LLAMA_LOG_DEBUG("%s: retrying batch size %d after cache optimization\n", __func__, balloc->get_n_tokens()); | ||
| 1579 | |||
| 1580 | continue; | ||
| 1581 | } | ||
| 1582 | } | ||
| 1583 | |||
| 1584 | LLAMA_LOG_WARN("%s: failed to find a memory slot for batch of size %d\n", __func__, balloc->get_n_tokens()); | ||
| 1585 | |||
| 1586 | return 1; | ||
| 1587 | } | ||
| 1588 | case LLAMA_MEMORY_STATUS_FAILED_COMPUTE: | ||
| 1589 | { | ||
| 1590 | LLAMA_LOG_ERROR("%s: compute failed while preparing batch of size %d\n", __func__, balloc->get_n_tokens()); | ||
| 1591 | |||
| 1592 | return -2; | ||
| 1593 | } | ||
| 1594 | } | ||
| 1595 | |||
| 1596 | break; | ||
| 1597 | } | ||
| 1598 | |||
| 1599 | // reserve output buffer | ||
| 1600 | if (output_reserve(n_outputs_all) < n_outputs_all) { | ||
| 1601 | LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); | ||
| 1602 | return -2; | ||
| 1603 | }; | ||
| 1604 | |||
| 1605 | int64_t n_outputs_prev = 0; | ||
| 1606 | |||
| 1607 | do { | ||
| 1608 | const auto & ubatch = mctx->get_ubatch(); | ||
| 1609 | |||
| 1610 | // count the outputs in this ubatch | ||
| 1611 | { | ||
| 1612 | int32_t n_outputs_new = 0; | ||
| 1613 | |||
| 1614 | if (n_outputs_all == n_tokens_all) { | ||
| 1615 | n_outputs_new = ubatch.n_tokens; | ||
| 1616 | } else { | ||
| 1617 | for (uint32_t i = 0; i < ubatch.n_tokens; i++) { | ||
| 1618 | n_outputs_new += (int32_t) (ubatch.output[i] != 0); | ||
| 1619 | } | ||
| 1620 | } | ||
| 1621 | |||
| 1622 | // needs to happen before the graph is built | ||
| 1623 | n_outputs = n_outputs_new; | ||
| 1624 | } | ||
| 1625 | |||
| 1626 | ggml_status status; | ||
| 1627 | const auto * res = process_ubatch(ubatch, LLM_GRAPH_TYPE_DECODER, mctx.get(), status); | ||
| 1628 | |||
| 1629 | if (!res) { | ||
| 1630 | // the last ubatch failed or was aborted -> remove all positions of that ubatch from the memory module | ||
| 1631 | llama_pos pos_min[LLAMA_MAX_SEQ]; | ||
| 1632 | for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { | ||
| 1633 | pos_min[s] = std::numeric_limits<llama_pos>::max(); | ||
| 1634 | } | ||
| 1635 | |||
| 1636 | for (uint32_t i = 0; i < ubatch.n_tokens; ++i) { | ||
| 1637 | const auto & seq_id = ubatch.seq_id[i][0]; | ||
| 1638 | |||
| 1639 | pos_min[seq_id] = std::min(pos_min[seq_id], ubatch.pos[i]); | ||
| 1640 | } | ||
| 1641 | |||
| 1642 | for (int s = 0; s < LLAMA_MAX_SEQ; ++s) { | ||
| 1643 | if (pos_min[s] == std::numeric_limits<llama_pos>::max()) { | ||
| 1644 | continue; | ||
| 1645 | } | ||
| 1646 | |||
| 1647 | LLAMA_LOG_WARN("%s: removing memory module entries for seq_id = %d, pos = [%d, +inf)\n", __func__, s, pos_min[s]); | ||
| 1648 | |||
| 1649 | memory->seq_rm(s, pos_min[s], -1); | ||
| 1650 | } | ||
| 1651 | |||
| 1652 | switch (status) { | ||
| 1653 | case GGML_STATUS_ABORTED: return 2; | ||
| 1654 | case GGML_STATUS_ALLOC_FAILED: return -2; | ||
| 1655 | case GGML_STATUS_FAILED: return -3; | ||
| 1656 | case GGML_STATUS_SUCCESS: GGML_ABORT("should not happen"); | ||
| 1657 | } | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | // plot the computation graph in dot format (for debugging purposes) | ||
| 1661 | //if (n_past%100 == 0) { | ||
| 1662 | // ggml_graph_dump_dot(gf, NULL, "llama.dot"); | ||
| 1663 | //} | ||
| 1664 | |||
| 1665 | auto * t_logits = res->get_logits(); | ||
| 1666 | auto * t_embd = cparams.embeddings ? res->get_embd() : nullptr; | ||
| 1667 | |||
| 1668 | if (t_embd && res->get_embd_pooled()) { | ||
| 1669 | t_embd = res->get_embd_pooled(); | ||
| 1670 | } | ||
| 1671 | |||
| 1672 | // extract logits | ||
| 1673 | if (logits.data && t_logits && n_outputs > 0 && needs_raw_logits(ubatch, sampling.samplers)) { | ||
| 1674 | ggml_backend_t backend_res = ggml_backend_sched_get_tensor_backend(sched.get(), t_logits); | ||
| 1675 | GGML_ASSERT(backend_res != nullptr); | ||
| 1676 | GGML_ASSERT(logits.data != nullptr); | ||
| 1677 | |||
| 1678 | float * logits_out = logits.data + n_outputs_prev*n_vocab; | ||
| 1679 | |||
| 1680 | if (n_outputs) { | ||
| 1681 | GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all); | ||
| 1682 | GGML_ASSERT((n_outputs_prev + n_outputs)*n_vocab <= (int64_t) logits.size); | ||
| 1683 | ggml_backend_tensor_get_async(backend_res, t_logits, logits_out, 0, n_outputs*n_vocab*sizeof(float)); | ||
| 1684 | } | ||
| 1685 | } | ||
| 1686 | |||
| 1687 | // extract embeddings | ||
| 1688 | if (embd.data && t_embd && n_outputs > 0) { | ||
| 1689 | ggml_backend_t backend_embd = ggml_backend_sched_get_tensor_backend(sched.get(), t_embd); | ||
| 1690 | GGML_ASSERT(backend_embd != nullptr); | ||
| 1691 | |||
| 1692 | switch (cparams.pooling_type) { | ||
| 1693 | case LLAMA_POOLING_TYPE_NONE: | ||
| 1694 | { | ||
| 1695 | // extract token embeddings | ||
| 1696 | GGML_ASSERT(embd.data != nullptr); | ||
| 1697 | const uint32_t n_embd_out = hparams.n_embd_out(); | ||
| 1698 | float * embd_out = embd.data + n_outputs_prev*n_embd_out; | ||
| 1699 | |||
| 1700 | if (n_outputs) { | ||
| 1701 | GGML_ASSERT( n_outputs_prev + n_outputs <= n_outputs_all); | ||
| 1702 | GGML_ASSERT((n_outputs_prev + n_outputs)*n_embd_out <= (int64_t) embd.size); | ||
| 1703 | ggml_backend_tensor_get_async(backend_embd, t_embd, embd_out, 0, n_outputs*n_embd_out*sizeof(float)); | ||
| 1704 | } | ||
| 1705 | } break; | ||
| 1706 | case LLAMA_POOLING_TYPE_MEAN: | ||
| 1707 | case LLAMA_POOLING_TYPE_CLS: | ||
| 1708 | case LLAMA_POOLING_TYPE_LAST: | ||
| 1709 | { | ||
| 1710 | // extract sequence embeddings (cleared before processing each batch) | ||
| 1711 | auto & embd_seq_out = embd_seq; | ||
| 1712 | |||
| 1713 | for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { | ||
| 1714 | const llama_seq_id seq_id = ubatch.seq_id_unq[s]; | ||
| 1715 | const int32_t seq_idx = ubatch.seq_idx[seq_id]; | ||
| 1716 | |||
| 1717 | embd_seq_out[seq_id].resize(n_embd); | ||
| 1718 | ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_embd*seq_idx)*sizeof(float), n_embd*sizeof(float)); | ||
| 1719 | } | ||
| 1720 | } break; | ||
| 1721 | case LLAMA_POOLING_TYPE_RANK: | ||
| 1722 | { | ||
| 1723 | // extract the rerank score - n_cls_out floats per sequence | ||
| 1724 | auto & embd_seq_out = embd_seq; | ||
| 1725 | |||
| 1726 | const uint32_t n_cls_out = hparams.n_cls_out; | ||
| 1727 | |||
| 1728 | for (uint32_t s = 0; s < ubatch.n_seqs_unq; ++s) { | ||
| 1729 | const llama_seq_id seq_id = ubatch.seq_id_unq[s]; | ||
| 1730 | const int32_t seq_idx = ubatch.seq_idx[seq_id]; | ||
| 1731 | |||
| 1732 | embd_seq_out[seq_id].resize(n_cls_out); | ||
| 1733 | ggml_backend_tensor_get_async(backend_embd, t_embd, embd_seq_out[seq_id].data(), (n_cls_out*seq_idx)*sizeof(float), n_cls_out*sizeof(float)); | ||
| 1734 | } | ||
| 1735 | } break; | ||
| 1736 | case LLAMA_POOLING_TYPE_UNSPECIFIED: | ||
| 1737 | { | ||
| 1738 | GGML_ABORT("unknown pooling type"); | ||
| 1739 | } | ||
| 1740 | } | ||
| 1741 | } | ||
| 1742 | |||
| 1743 | // Copy backend sampling output if this ubatch produced any sampling tensors. | ||
| 1744 | if (has_samplers && (!res->t_sampled.empty() || !res->t_sampled_probs.empty() || !res->t_sampled_logits.empty())) { | ||
| 1745 | const auto seq_to_output_row = build_seq_to_output_row(ubatch, n_outputs_prev); | ||
| 1746 | const auto stride = n_vocab; | ||
| 1747 | |||
| 1748 | // async copy the sampling data from the backend to the host | ||
| 1749 | copy_tensor_async_ints(res->t_sampled, sampling.sampled, seq_to_output_row, sched.get()); | ||
| 1750 | |||
| 1751 | copy_tensor_async_floats (res->t_sampled_logits, sampling.logits, stride, sampling.logits_count, seq_to_output_row, sched.get()); | ||
| 1752 | copy_tensor_async_floats (res->t_sampled_probs, sampling.probs, stride, sampling.probs_count, seq_to_output_row, sched.get()); | ||
| 1753 | copy_tensor_async_candidates(res->t_candidates, sampling.candidates, stride, sampling.candidates_count, seq_to_output_row, sched.get()); | ||
| 1754 | } | ||
| 1755 | |||
| 1756 | n_outputs_prev += n_outputs; | ||
| 1757 | } while (mctx->next()); | ||
| 1758 | |||
| 1759 | // set to total number of outputs in the batch, for use in llama_get_logits_ith | ||
| 1760 | n_outputs = n_outputs_all; | ||
| 1761 | |||
| 1762 | // set output mappings | ||
| 1763 | if (n_outputs > 0) { | ||
| 1764 | bool sorted_output = true; | ||
| 1765 | |||
| 1766 | auto & out_ids = balloc->get_out_ids(); | ||
| 1767 | |||
| 1768 | GGML_ASSERT(out_ids.size() == (size_t) n_outputs); | ||
| 1769 | |||
| 1770 | for (int64_t i = 0; i < n_outputs; ++i) { | ||
| 1771 | int64_t out_id = out_ids[i]; | ||
| 1772 | output_ids[out_id] = i; | ||
| 1773 | if (out_id != i) { | ||
| 1774 | sorted_output = false; | ||
| 1775 | } | ||
| 1776 | } | ||
| 1777 | |||
| 1778 | // make the outputs have the same order they had in the user-provided batch | ||
| 1779 | // note: this is mostly relevant for recurrent models atm | ||
| 1780 | if (!sorted_output && n_outputs > 1) { | ||
| 1781 | GGML_ASSERT((size_t) n_outputs == out_ids.size()); | ||
| 1782 | |||
| 1783 | // TODO: is there something more efficient which also minimizes swaps? | ||
| 1784 | // selection sort, to minimize swaps (from https://en.wikipedia.org/wiki/Selection_sort) | ||
| 1785 | for (uint32_t i = 0; i < n_outputs - 1; ++i) { | ||
| 1786 | uint32_t j_min = i; | ||
| 1787 | for (uint32_t j = i + 1; j < n_outputs; ++j) { | ||
| 1788 | if (out_ids[j] < out_ids[j_min]) { | ||
| 1789 | j_min = j; | ||
| 1790 | } | ||
| 1791 | } | ||
| 1792 | if (j_min == i) { | ||
| 1793 | continue; | ||
| 1794 | } | ||
| 1795 | std::swap(out_ids[i], out_ids[j_min]); | ||
| 1796 | |||
| 1797 | // remember the swaps and apply them lazily upon logits/embeddings access | ||
| 1798 | output_swaps.push_back({ i, j_min }); | ||
| 1799 | } | ||
| 1800 | |||
| 1801 | std::fill(output_ids.begin(), output_ids.end(), -1); | ||
| 1802 | |||
| 1803 | for (uint32_t i = 0; i < n_outputs; ++i) { | ||
| 1804 | output_ids[out_ids[i]] = i; | ||
| 1805 | } | ||
| 1806 | } | ||
| 1807 | } | ||
| 1808 | |||
| 1809 | // wait for the computation to finish (automatically done when obtaining the model output) | ||
| 1810 | //synchronize(); | ||
| 1811 | |||
| 1812 | return 0; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | // | ||
| 1816 | // output | ||
| 1817 | // | ||
| 1818 | |||
| 1819 | uint32_t llama_context::output_reserve(int32_t n_outputs) { | ||
| 1820 | |||
| 1821 | const auto & hparams = model.hparams; | ||
| 1822 | const auto & vocab = model.vocab; | ||
| 1823 | |||
| 1824 | const int64_t n_outputs_max = std::max<int64_t>(n_outputs, n_seq_max()); | ||
| 1825 | |||
| 1826 | const auto n_batch = cparams.n_batch; | ||
| 1827 | const auto n_vocab = vocab.n_tokens(); | ||
| 1828 | const auto n_embd_out = hparams.n_embd_out(); | ||
| 1829 | |||
| 1830 | bool has_logits = true; | ||
| 1831 | bool has_embd = cparams.embeddings; | ||
| 1832 | |||
| 1833 | // TODO: hacky enc-dec support | ||
| 1834 | if (model.arch == LLM_ARCH_T5) { | ||
| 1835 | has_logits = true; | ||
| 1836 | has_embd = true; | ||
| 1837 | } | ||
| 1838 | |||
| 1839 | |||
| 1840 | size_t backend_float_count = 0; | ||
| 1841 | size_t backend_token_count = 0; | ||
| 1842 | |||
| 1843 | logits.size = has_logits ? n_vocab*n_outputs_max : 0; | ||
| 1844 | embd.size = has_embd ? n_embd_out*n_outputs_max : 0; | ||
| 1845 | |||
| 1846 | // Allocate backend sampling output buffers if there are backend samplers configured. | ||
| 1847 | const bool has_sampling = !sampling.samplers.empty(); | ||
| 1848 | if (has_sampling) { | ||
| 1849 | backend_float_count = 2 * n_vocab * n_outputs_max; // logits + probs | ||
| 1850 | backend_token_count = (1 + n_vocab) * n_outputs_max; // sampled + candidates | ||
| 1851 | } | ||
| 1852 | |||
| 1853 | if (output_ids.empty()) { | ||
| 1854 | // init, never resized afterwards | ||
| 1855 | output_ids.resize(n_batch); | ||
| 1856 | } | ||
| 1857 | |||
| 1858 | const size_t prev_size = buf_output ? ggml_backend_buffer_get_size(buf_output.get()) : 0; | ||
| 1859 | const size_t new_size = | ||
| 1860 | (logits.size + embd.size + backend_float_count) * sizeof(float) + | ||
| 1861 | ( backend_token_count) * sizeof(llama_token); | ||
| 1862 | |||
| 1863 | // alloc only when more than the current capacity is required | ||
| 1864 | // TODO: also consider shrinking the buffer | ||
| 1865 | if (!buf_output || prev_size < new_size) { | ||
| 1866 | if (buf_output) { | ||
| 1867 | #ifndef NDEBUG | ||
| 1868 | // This doesn't happen often, but may be annoying in some cases (like the HellaSwag benchmark) | ||
| 1869 | LLAMA_LOG_DEBUG("%s: reallocating output buffer from size %.02f MiB to %.02f MiB\n", __func__, prev_size / 1024.0 / 1024.0, new_size / 1024.0 / 1024.0); | ||
| 1870 | #endif | ||
| 1871 | synchronize(); | ||
| 1872 | |||
| 1873 | // TODO: not needed? | ||
| 1874 | buf_output = nullptr; | ||
| 1875 | logits.data = nullptr; | ||
| 1876 | embd.data = nullptr; | ||
| 1877 | } | ||
| 1878 | |||
| 1879 | auto * buft = ggml_backend_cpu_buffer_type(); | ||
| 1880 | // try to use the host buffer of the device where the output tensor is allocated for faster transfer to system memory | ||
| 1881 | auto * output_dev = model.dev_output(); | ||
| 1882 | auto * output_dev_host_buft = output_dev ? ggml_backend_dev_host_buffer_type(output_dev) : nullptr; | ||
| 1883 | if (output_dev_host_buft) { | ||
| 1884 | buft = output_dev_host_buft; | ||
| 1885 | } | ||
| 1886 | buf_output.reset(ggml_backend_buft_alloc_buffer(buft, new_size)); | ||
| 1887 | if (buf_output == nullptr) { | ||
| 1888 | LLAMA_LOG_ERROR("%s: failed to allocate output buffer of size %.2f MiB\n", __func__, new_size / (1024.0 * 1024.0)); | ||
| 1889 | return 0; | ||
| 1890 | } | ||
| 1891 | } | ||
| 1892 | |||
| 1893 | float * output_base = (float *) ggml_backend_buffer_get_base(buf_output.get()); | ||
| 1894 | |||
| 1895 | size_t offset = 0; | ||
| 1896 | uint8_t * base = (uint8_t *) output_base; | ||
| 1897 | |||
| 1898 | logits = has_logits ? buffer_view<float>{output_base, logits.size} : buffer_view<float>{nullptr, 0}; | ||
| 1899 | offset += logits.size * sizeof(float); | ||
| 1900 | |||
| 1901 | embd = has_embd ? buffer_view<float>{(float *) (base + offset), embd.size} : buffer_view<float>{nullptr, 0}; | ||
| 1902 | offset += embd.size * sizeof(float); | ||
| 1903 | |||
| 1904 | sampling.logits = {nullptr, 0}; | ||
| 1905 | sampling.probs = {nullptr, 0}; | ||
| 1906 | sampling.sampled = {nullptr, 0}; | ||
| 1907 | sampling.candidates = {nullptr, 0}; | ||
| 1908 | |||
| 1909 | if (has_sampling) { | ||
| 1910 | sampling.logits = {(float *) (base + offset), (size_t)(n_vocab*n_outputs_max)}; | ||
| 1911 | offset += sampling.logits.size * sizeof(float); | ||
| 1912 | |||
| 1913 | sampling.probs = {(float *) (base + offset), (size_t)(n_vocab*n_outputs_max)}; | ||
| 1914 | offset += sampling.probs.size * sizeof(float); | ||
| 1915 | |||
| 1916 | sampling.sampled = {(llama_token *) (base + offset), (size_t)n_outputs_max}; | ||
| 1917 | offset += sampling.sampled.size * sizeof(llama_token); | ||
| 1918 | |||
| 1919 | sampling.candidates = {(llama_token *) (base + offset), (size_t)(n_vocab*n_outputs_max)}; | ||
| 1920 | offset += sampling.candidates.size * sizeof(llama_token); | ||
| 1921 | |||
| 1922 | // The count vectors keep track of the actual number of logits/probs/candidates | ||
| 1923 | // copied from the backend for each output row. | ||
| 1924 | |||
| 1925 | sampling.logits_count.resize(n_outputs_max); | ||
| 1926 | sampling.probs_count.resize(n_outputs_max); | ||
| 1927 | sampling.candidates_count.resize(n_outputs_max); | ||
| 1928 | |||
| 1929 | std::fill(sampling.logits_count.begin(), sampling.logits_count.end(), 0); | ||
| 1930 | std::fill(sampling.probs_count.begin(), sampling.probs_count.end(), 0); | ||
| 1931 | std::fill(sampling.candidates_count.begin(), sampling.candidates_count.end(), 0); | ||
| 1932 | |||
| 1933 | std::fill_n(sampling.sampled.data, sampling.sampled.size, LLAMA_TOKEN_NULL); | ||
| 1934 | } | ||
| 1935 | |||
| 1936 | // set all ids as invalid (negative) | ||
| 1937 | std::fill(output_ids.begin(), output_ids.end(), -1); | ||
| 1938 | |||
| 1939 | this->n_outputs = 0; | ||
| 1940 | |||
| 1941 | return n_outputs_max; | ||
| 1942 | } | ||
| 1943 | |||
| 1944 | void llama_context::output_reorder() { | ||
| 1945 | const uint64_t n_vocab = model.vocab.n_tokens(); | ||
| 1946 | const uint64_t n_embd = model.hparams.n_embd; | ||
| 1947 | |||
| 1948 | for (size_t s = 0; s < output_swaps.size(); ++s) { | ||
| 1949 | const uint64_t i0 = output_swaps[s].i0; | ||
| 1950 | const uint64_t i1 = output_swaps[s].i1; | ||
| 1951 | |||
| 1952 | if (logits.size > 0) { | ||
| 1953 | for (uint64_t k = 0; k < n_vocab; k++) { | ||
| 1954 | std::swap(logits.data[i0*n_vocab + k], logits.data[i1*n_vocab + k]); | ||
| 1955 | } | ||
| 1956 | } | ||
| 1957 | |||
| 1958 | if (embd.size > 0) { | ||
| 1959 | for (uint64_t k = 0; k < n_embd; k++) { | ||
| 1960 | std::swap(embd.data[i0*n_embd + k], embd.data[i1*n_embd + k]); | ||
| 1961 | } | ||
| 1962 | } | ||
| 1963 | |||
| 1964 | if (sampling.logits.has_data()) { | ||
| 1965 | for (uint64_t k = 0; k < n_vocab; ++k) { | ||
| 1966 | std::swap(sampling.logits.data[i0*n_vocab + k], sampling.logits.data[i1*n_vocab + k]); | ||
| 1967 | } | ||
| 1968 | } | ||
| 1969 | |||
| 1970 | if (sampling.probs.has_data()) { | ||
| 1971 | for (uint64_t k = 0; k < n_vocab; ++k) { | ||
| 1972 | std::swap(sampling.probs.data[i0*n_vocab + k], sampling.probs.data[i1*n_vocab + k]); | ||
| 1973 | } | ||
| 1974 | } | ||
| 1975 | |||
| 1976 | if (sampling.candidates.has_data()) { | ||
| 1977 | for (uint64_t k = 0; k < n_vocab; ++k) { | ||
| 1978 | std::swap(sampling.candidates.data[i0*n_vocab + k], sampling.candidates.data[i1*n_vocab + k]); | ||
| 1979 | } | ||
| 1980 | } | ||
| 1981 | |||
| 1982 | if (sampling.sampled.has_data()) { | ||
| 1983 | std::swap(sampling.sampled.data[i0], sampling.sampled.data[i1]); | ||
| 1984 | } | ||
| 1985 | |||
| 1986 | if (!sampling.logits_count.empty()) { | ||
| 1987 | std::swap(sampling.logits_count[i0], sampling.logits_count[i1]); | ||
| 1988 | } | ||
| 1989 | |||
| 1990 | if (!sampling.probs_count.empty()) { | ||
| 1991 | std::swap(sampling.probs_count[i0], sampling.probs_count[i1]); | ||
| 1992 | } | ||
| 1993 | |||
| 1994 | if (!sampling.candidates_count.empty()) { | ||
| 1995 | std::swap(sampling.candidates_count[i0], sampling.candidates_count[i1]); | ||
| 1996 | } | ||
| 1997 | } | ||
| 1998 | |||
| 1999 | output_swaps.clear(); | ||
| 2000 | } | ||
| 2001 | |||
| 2002 | // | ||
| 2003 | // graph | ||
| 2004 | // | ||
| 2005 | |||
| 2006 | uint32_t llama_context::graph_max_nodes(uint32_t n_tokens) const { | ||
| 2007 | if (model.arch == LLM_ARCH_QWEN3NEXT || model.arch == LLM_ARCH_KIMI_LINEAR || model.arch == LLM_ARCH_QWEN35 || model.arch == LLM_ARCH_QWEN35MOE) { | ||
| 2008 | return std::max<uint32_t>(n_tokens * 40, 32u * model.n_tensors()); | ||
| 2009 | } | ||
| 2010 | uint32_t res = std::max<uint32_t>(1024u, 8u*model.n_tensors()); | ||
| 2011 | for (const auto & lora : model.loras) { | ||
| 2012 | res += lora->get_n_nodes(); | ||
| 2013 | } | ||
| 2014 | return res; | ||
| 2015 | } | ||
| 2016 | |||
| 2017 | llm_graph_result * llama_context::get_gf_res_reserve() const { | ||
| 2018 | return static_cast<llm_graph_result *>(gf_res_reserve.get()); | ||
| 2019 | } | ||
| 2020 | |||
| 2021 | ggml_cgraph * llama_context::graph_reserve( | ||
| 2022 | uint32_t n_tokens, uint32_t n_seqs, uint32_t n_outputs, const llama_memory_context_i * mctx, bool split_only, size_t * sizes) { | ||
| 2023 | LLAMA_LOG_DEBUG("%s: reserving a graph for ubatch with n_tokens = %4u, n_seqs = %2u, n_outputs = %4u\n", __func__, n_tokens, n_seqs, n_outputs); | ||
| 2024 | GGML_ASSERT(n_outputs >= 1); | ||
| 2025 | |||
| 2026 | if (n_tokens % n_seqs != 0) { | ||
| 2027 | n_tokens = ((n_tokens + (n_seqs - 1)) / n_seqs) * n_seqs; // round to next multiple of n_seqs | ||
| 2028 | n_outputs = std::max(n_outputs, n_tokens); | ||
| 2029 | |||
| 2030 | LLAMA_LOG_DEBUG("%s: making n_tokens a multiple of n_seqs - n_tokens = %u, n_seqs = %u, n_outputs = %u\n", __func__, n_tokens, n_seqs, n_outputs); | ||
| 2031 | } | ||
| 2032 | |||
| 2033 | ggml_backend_sched_reset(sched.get()); | ||
| 2034 | |||
| 2035 | // when the scheduler is reset, we cannnot reuse the old graph, so we reset the previous graph result to prevent that | ||
| 2036 | gf_res_prev->reset(); | ||
| 2037 | |||
| 2038 | // store the n_outputs as it is, and restore it afterwards | ||
| 2039 | // TODO: not sure if needed, might simplify in the future by removing this | ||
| 2040 | const auto save_n_outputs = this->n_outputs; | ||
| 2041 | |||
| 2042 | this->n_outputs = n_outputs; | ||
| 2043 | |||
| 2044 | llama_batch_allocr balloc(model.hparams.n_pos_per_embd()); | ||
| 2045 | llama_ubatch ubatch = balloc.ubatch_reserve(n_tokens/n_seqs, n_seqs); | ||
| 2046 | |||
| 2047 | // set one output token per sequence in order to activate all backend samplers | ||
| 2048 | std::vector<llama_seq_id> seq_ids(n_seqs); | ||
| 2049 | for (uint32_t i = 0; i < n_seqs; ++i) { | ||
| 2050 | seq_ids[i] = i; | ||
| 2051 | ubatch.n_seq_id[i] = 1; | ||
| 2052 | ubatch.seq_id[i] = &seq_ids[i]; | ||
| 2053 | ubatch.output[i] = true; | ||
| 2054 | } | ||
| 2055 | |||
| 2056 | auto * res = gf_res_reserve.get(); | ||
| 2057 | |||
| 2058 | const auto gparams = graph_params(res, ubatch, mctx, LLM_GRAPH_TYPE_DEFAULT); | ||
| 2059 | |||
| 2060 | res->reset(); | ||
| 2061 | |||
| 2062 | auto * gf = model.build_graph(gparams); | ||
| 2063 | |||
| 2064 | this->n_outputs = save_n_outputs; | ||
| 2065 | |||
| 2066 | // initialize scheduler with the specified graph | ||
| 2067 | if (split_only) { | ||
| 2068 | if (sizes) { | ||
| 2069 | ggml_backend_sched_reserve_size(sched.get(), gf, sizes); | ||
| 2070 | } else { | ||
| 2071 | ggml_backend_sched_split_graph(sched.get(), gf); | ||
| 2072 | } | ||
| 2073 | } else if (!ggml_backend_sched_reserve(sched.get(), gf)) { | ||
| 2074 | GGML_ASSERT(!sizes); | ||
| 2075 | LLAMA_LOG_ERROR("%s: failed to allocate compute buffers\n", __func__); | ||
| 2076 | return nullptr; | ||
| 2077 | } | ||
| 2078 | |||
| 2079 | return gf; | ||
| 2080 | } | ||
| 2081 | |||
| 2082 | llm_graph_params llama_context::graph_params( | ||
| 2083 | llm_graph_result * res, | ||
| 2084 | const llama_ubatch & ubatch, | ||
| 2085 | const llama_memory_context_i * mctx, | ||
| 2086 | llm_graph_type gtype) const { | ||
| 2087 | return { | ||
| 2088 | /*.arch =*/ model.arch, | ||
| 2089 | /*.hparams =*/ model.hparams, | ||
| 2090 | /*.cparams =*/ cparams, | ||
| 2091 | /*.ubatch =*/ ubatch, | ||
| 2092 | /*.gtype =*/ gtype, | ||
| 2093 | /*.sched =*/ sched.get(), | ||
| 2094 | /*.backend_cpu =*/ backend_cpu, | ||
| 2095 | /*.cvec =*/ &cvec, | ||
| 2096 | /*.loras =*/ &loras, | ||
| 2097 | /*.mctx =*/ mctx, | ||
| 2098 | /*.cross =*/ &cross, | ||
| 2099 | /*.samplers =*/ sampling.samplers, | ||
| 2100 | /*.n_outputs =*/ n_outputs, | ||
| 2101 | /*.cb =*/ graph_get_cb(), | ||
| 2102 | /*.res =*/ res, | ||
| 2103 | }; | ||
| 2104 | } | ||
| 2105 | |||
| 2106 | ggml_status llama_context::graph_compute( | ||
| 2107 | ggml_cgraph * gf, | ||
| 2108 | bool batched) { | ||
| 2109 | int n_threads = batched ? cparams.n_threads_batch : cparams.n_threads; | ||
| 2110 | ggml_threadpool_t tp = batched ? threadpool_batch : threadpool; | ||
| 2111 | |||
| 2112 | if (backend_cpu != nullptr) { | ||
| 2113 | auto * reg = ggml_backend_dev_backend_reg(ggml_backend_get_device(backend_cpu)); | ||
| 2114 | auto * set_threadpool_fn = (decltype(ggml_backend_cpu_set_threadpool) *) ggml_backend_reg_get_proc_address(reg, "ggml_backend_cpu_set_threadpool"); | ||
| 2115 | if (set_threadpool_fn) { | ||
| 2116 | set_threadpool_fn(backend_cpu, tp); | ||
| 2117 | } | ||
| 2118 | } | ||
| 2119 | |||
| 2120 | // set the number of threads for all the backends | ||
| 2121 | for (const auto & set_n_threads_fn : set_n_threads_fns) { | ||
| 2122 | set_n_threads_fn.second(set_n_threads_fn.first, n_threads); | ||
| 2123 | } | ||
| 2124 | |||
| 2125 | auto status = ggml_backend_sched_graph_compute_async(sched.get(), gf); | ||
| 2126 | if (status != GGML_STATUS_SUCCESS) { | ||
| 2127 | LLAMA_LOG_ERROR("%s: ggml_backend_sched_graph_compute_async failed with error %d\n", __func__, status); | ||
| 2128 | } | ||
| 2129 | |||
| 2130 | // fprintf(stderr, "splits: %d\n", ggml_backend_sched_get_n_splits(sched)); | ||
| 2131 | |||
| 2132 | return status; | ||
| 2133 | } | ||
| 2134 | |||
| 2135 | llm_graph_cb llama_context::graph_get_cb() const { | ||
| 2136 | return [&](const llama_ubatch & ubatch, ggml_tensor * cur, const char * name, int il) { | ||
| 2137 | if (il >= 0) { | ||
| 2138 | ggml_format_name(cur, "%s-%d", name, il); | ||
| 2139 | } else { | ||
| 2140 | ggml_set_name(cur, name); | ||
| 2141 | } | ||
| 2142 | |||
| 2143 | // norm may be automatically assigned to the backend of the previous layer, increasing data transfer between backends | ||
| 2144 | // FIXME: fix in ggml_backend_sched | ||
| 2145 | const bool full_offload = model.n_gpu_layers() > model.hparams.n_layer; | ||
| 2146 | if (ubatch.n_tokens < 32 || full_offload) { | ||
| 2147 | if (il != -1 && strcmp(name, "norm") == 0) { | ||
| 2148 | const auto & dev_layer = model.dev_layer(il); | ||
| 2149 | for (const auto & backend : backends) { | ||
| 2150 | if (ggml_backend_get_device(backend.get()) == dev_layer) { | ||
| 2151 | if (ggml_backend_supports_op(backend.get(), cur)) { | ||
| 2152 | ggml_backend_sched_set_tensor_backend(sched.get(), cur, backend.get()); | ||
| 2153 | } | ||
| 2154 | } | ||
| 2155 | } | ||
| 2156 | } | ||
| 2157 | } | ||
| 2158 | }; | ||
| 2159 | } | ||
| 2160 | |||
| 2161 | // | ||
| 2162 | // state save/load | ||
| 2163 | // | ||
| 2164 | |||
| 2165 | class llama_io_write_dummy : public llama_io_write_i { | ||
| 2166 | public: | ||
| 2167 | llama_io_write_dummy() = default; | ||
| 2168 | |||
| 2169 | void write(const void * /* src */, size_t size) override { | ||
| 2170 | size_written += size; | ||
| 2171 | } | ||
| 2172 | |||
| 2173 | void write_tensor(const ggml_tensor * /* tensor */, size_t /* offset */, size_t size) override { | ||
| 2174 | size_written += size; | ||
| 2175 | } | ||
| 2176 | |||
| 2177 | size_t n_bytes() override { | ||
| 2178 | return size_written; | ||
| 2179 | } | ||
| 2180 | |||
| 2181 | private: | ||
| 2182 | size_t size_written = 0; | ||
| 2183 | }; | ||
| 2184 | |||
| 2185 | class llama_io_write_buffer : public llama_io_write_i { | ||
| 2186 | public: | ||
| 2187 | llama_io_write_buffer( | ||
| 2188 | uint8_t * p, size_t len) : ptr(p), buf_size(len) {} | ||
| 2189 | |||
| 2190 | void write(const void * src, size_t size) override { | ||
| 2191 | if (size > buf_size) { | ||
| 2192 | throw std::runtime_error("unexpectedly reached end of buffer"); | ||
| 2193 | } | ||
| 2194 | memcpy(ptr, src, size); | ||
| 2195 | ptr += size; | ||
| 2196 | size_written += size; | ||
| 2197 | buf_size -= size; | ||
| 2198 | } | ||
| 2199 | |||
| 2200 | void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override { | ||
| 2201 | if (size > buf_size) { | ||
| 2202 | throw std::runtime_error("unexpectedly reached end of buffer"); | ||
| 2203 | } | ||
| 2204 | ggml_backend_tensor_get(tensor, ptr, offset, size); | ||
| 2205 | ptr += size; | ||
| 2206 | size_written += size; | ||
| 2207 | buf_size -= size; | ||
| 2208 | } | ||
| 2209 | |||
| 2210 | size_t n_bytes() override { | ||
| 2211 | return size_written; | ||
| 2212 | } | ||
| 2213 | |||
| 2214 | private: | ||
| 2215 | uint8_t * ptr; | ||
| 2216 | size_t buf_size = 0; | ||
| 2217 | size_t size_written = 0; | ||
| 2218 | }; | ||
| 2219 | |||
| 2220 | class llama_io_read_buffer : public llama_io_read_i { | ||
| 2221 | public: | ||
| 2222 | llama_io_read_buffer(const uint8_t * p, size_t len) : ptr(p), buf_size(len) {} | ||
| 2223 | |||
| 2224 | const uint8_t * read(size_t size) override { | ||
| 2225 | const uint8_t * base_ptr = ptr; | ||
| 2226 | if (size > buf_size) { | ||
| 2227 | throw std::runtime_error("unexpectedly reached end of buffer"); | ||
| 2228 | } | ||
| 2229 | ptr += size; | ||
| 2230 | size_read += size; | ||
| 2231 | buf_size -= size; | ||
| 2232 | return base_ptr; | ||
| 2233 | } | ||
| 2234 | |||
| 2235 | void read_to(void * dst, size_t size) override { | ||
| 2236 | memcpy(dst, read(size), size); | ||
| 2237 | } | ||
| 2238 | |||
| 2239 | size_t n_bytes() override { | ||
| 2240 | return size_read; | ||
| 2241 | } | ||
| 2242 | |||
| 2243 | private: | ||
| 2244 | const uint8_t * ptr; | ||
| 2245 | size_t buf_size = 0; | ||
| 2246 | size_t size_read = 0; | ||
| 2247 | }; | ||
| 2248 | |||
| 2249 | class llama_io_write_file : public llama_io_write_i { | ||
| 2250 | public: | ||
| 2251 | llama_io_write_file(llama_file * f) : file(f) {} | ||
| 2252 | |||
| 2253 | void write(const void * src, size_t size) override { | ||
| 2254 | file->write_raw(src, size); | ||
| 2255 | size_written += size; | ||
| 2256 | } | ||
| 2257 | |||
| 2258 | void write_tensor(const ggml_tensor * tensor, size_t offset, size_t size) override { | ||
| 2259 | temp_buffer.resize(size); | ||
| 2260 | ggml_backend_tensor_get(tensor, temp_buffer.data(), offset, size); | ||
| 2261 | write(temp_buffer.data(), temp_buffer.size()); | ||
| 2262 | } | ||
| 2263 | |||
| 2264 | size_t n_bytes() override { | ||
| 2265 | return size_written; | ||
| 2266 | } | ||
| 2267 | |||
| 2268 | private: | ||
| 2269 | llama_file * file; | ||
| 2270 | size_t size_written = 0; | ||
| 2271 | std::vector<uint8_t> temp_buffer; | ||
| 2272 | }; | ||
| 2273 | |||
| 2274 | class llama_io_read_file : public llama_io_read_i { | ||
| 2275 | public: | ||
| 2276 | llama_io_read_file(llama_file * f) : file(f) {} | ||
| 2277 | |||
| 2278 | void read_to(void * dst, size_t size) override { | ||
| 2279 | file->read_raw(dst, size); | ||
| 2280 | size_read += size; | ||
| 2281 | } | ||
| 2282 | |||
| 2283 | const uint8_t * read(size_t size) override { | ||
| 2284 | temp_buffer.resize(size); | ||
| 2285 | read_to(temp_buffer.data(), size); | ||
| 2286 | return temp_buffer.data(); | ||
| 2287 | } | ||
| 2288 | |||
| 2289 | size_t n_bytes() override { | ||
| 2290 | return size_read; | ||
| 2291 | } | ||
| 2292 | |||
| 2293 | private: | ||
| 2294 | llama_file * file; | ||
| 2295 | size_t size_read = 0; | ||
| 2296 | std::vector<uint8_t> temp_buffer; | ||
| 2297 | }; | ||
| 2298 | |||
| 2299 | size_t llama_context::state_get_size() { | ||
| 2300 | llama_io_write_dummy io; | ||
| 2301 | try { | ||
| 2302 | return state_write_data(io); | ||
| 2303 | } catch (const std::exception & err) { | ||
| 2304 | LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); | ||
| 2305 | return 0; | ||
| 2306 | } | ||
| 2307 | } | ||
| 2308 | |||
| 2309 | size_t llama_context::state_get_data(uint8_t * dst, size_t size) { | ||
| 2310 | llama_io_write_buffer io(dst, size); | ||
| 2311 | try { | ||
| 2312 | return state_write_data(io); | ||
| 2313 | } catch (const std::exception & err) { | ||
| 2314 | LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); | ||
| 2315 | return 0; | ||
| 2316 | } | ||
| 2317 | } | ||
| 2318 | |||
| 2319 | size_t llama_context::state_set_data(const uint8_t * src, size_t size) { | ||
| 2320 | llama_io_read_buffer io(src, size); | ||
| 2321 | try { | ||
| 2322 | return state_read_data(io); | ||
| 2323 | } catch (const std::exception & err) { | ||
| 2324 | LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); | ||
| 2325 | return 0; | ||
| 2326 | } | ||
| 2327 | } | ||
| 2328 | |||
| 2329 | size_t llama_context::state_seq_get_size(llama_seq_id seq_id, llama_state_seq_flags flags) { | ||
| 2330 | llama_io_write_dummy io; | ||
| 2331 | try { | ||
| 2332 | return state_seq_write_data(io, seq_id, flags); | ||
| 2333 | } catch (const std::exception & err) { | ||
| 2334 | LLAMA_LOG_ERROR("%s: error getting state size: %s\n", __func__, err.what()); | ||
| 2335 | return 0; | ||
| 2336 | } | ||
| 2337 | } | ||
| 2338 | |||
| 2339 | size_t llama_context::state_seq_get_data(llama_seq_id seq_id, uint8_t * dst, size_t size, llama_state_seq_flags flags) { | ||
| 2340 | llama_io_write_buffer io(dst, size); | ||
| 2341 | try { | ||
| 2342 | return state_seq_write_data(io, seq_id, flags); | ||
| 2343 | } catch (const std::exception & err) { | ||
| 2344 | LLAMA_LOG_ERROR("%s: error saving state: %s\n", __func__, err.what()); | ||
| 2345 | return 0; | ||
| 2346 | } | ||
| 2347 | } | ||
| 2348 | |||
| 2349 | size_t llama_context::state_seq_set_data(llama_seq_id seq_id, const uint8_t * src, size_t size, llama_state_seq_flags flags) { | ||
| 2350 | llama_io_read_buffer io(src, size); | ||
| 2351 | try { | ||
| 2352 | return state_seq_read_data(io, seq_id, flags); | ||
| 2353 | } catch (const std::exception & err) { | ||
| 2354 | LLAMA_LOG_ERROR("%s: error loading state: %s\n", __func__, err.what()); | ||
| 2355 | return 0; | ||
| 2356 | } | ||
| 2357 | } | ||
| 2358 | |||
| 2359 | bool llama_context::state_load_file(const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { | ||
| 2360 | llama_file file(filepath, "rb"); | ||
| 2361 | |||
| 2362 | // sanity checks | ||
| 2363 | { | ||
| 2364 | const uint32_t magic = file.read_u32(); | ||
| 2365 | const uint32_t version = file.read_u32(); | ||
| 2366 | |||
| 2367 | if (magic != LLAMA_SESSION_MAGIC || version != LLAMA_SESSION_VERSION) { | ||
| 2368 | LLAMA_LOG_ERROR("%s: unknown (magic, version) for session file: %08x, %08x\n", __func__, magic, version); | ||
| 2369 | return false; | ||
| 2370 | } | ||
| 2371 | } | ||
| 2372 | |||
| 2373 | // load the prompt | ||
| 2374 | { | ||
| 2375 | const uint32_t n_token_count = file.read_u32(); | ||
| 2376 | |||
| 2377 | if (n_token_count > n_token_capacity) { | ||
| 2378 | LLAMA_LOG_ERROR("%s: token count in session file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); | ||
| 2379 | return false; | ||
| 2380 | } | ||
| 2381 | |||
| 2382 | file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); | ||
| 2383 | *n_token_count_out = n_token_count; | ||
| 2384 | } | ||
| 2385 | |||
| 2386 | // restore the context state | ||
| 2387 | { | ||
| 2388 | const size_t n_state_size_cur = file.size() - file.tell(); | ||
| 2389 | |||
| 2390 | llama_io_read_file io( &file); | ||
| 2391 | const size_t n_read = state_read_data(io); | ||
| 2392 | |||
| 2393 | if (n_read != n_state_size_cur) { | ||
| 2394 | LLAMA_LOG_ERROR("%s: did not read all of the session file data! size %zu, got %zu\n", __func__, n_state_size_cur, n_read); | ||
| 2395 | return false; | ||
| 2396 | } | ||
| 2397 | } | ||
| 2398 | |||
| 2399 | return true; | ||
| 2400 | } | ||
| 2401 | |||
| 2402 | bool llama_context::state_save_file(const char * filepath, const llama_token * tokens, size_t n_token_count) { | ||
| 2403 | llama_file file(filepath, "wb"); | ||
| 2404 | |||
| 2405 | file.write_u32(LLAMA_SESSION_MAGIC); | ||
| 2406 | file.write_u32(LLAMA_SESSION_VERSION); | ||
| 2407 | |||
| 2408 | // save the prompt | ||
| 2409 | file.write_u32((uint32_t) n_token_count); | ||
| 2410 | file.write_raw(tokens, sizeof(llama_token) * n_token_count); | ||
| 2411 | |||
| 2412 | // save the context state using stream saving | ||
| 2413 | llama_io_write_file io(&file); | ||
| 2414 | state_write_data(io); | ||
| 2415 | |||
| 2416 | return true; | ||
| 2417 | } | ||
| 2418 | |||
| 2419 | size_t llama_context::state_seq_load_file(llama_seq_id seq_id, const char * filepath, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { | ||
| 2420 | llama_file file(filepath, "rb"); | ||
| 2421 | |||
| 2422 | // version checks | ||
| 2423 | { | ||
| 2424 | const uint32_t magic = file.read_u32(); | ||
| 2425 | const uint32_t version = file.read_u32(); | ||
| 2426 | |||
| 2427 | if (magic != LLAMA_STATE_SEQ_MAGIC || version != LLAMA_STATE_SEQ_VERSION) { | ||
| 2428 | LLAMA_LOG_ERROR("%s: unknown (magic, version) for sequence state file: %08x, %08x\n", __func__, magic, version); | ||
| 2429 | return 0; | ||
| 2430 | } | ||
| 2431 | } | ||
| 2432 | |||
| 2433 | // load the prompt | ||
| 2434 | { | ||
| 2435 | const uint32_t n_token_count = file.read_u32(); | ||
| 2436 | |||
| 2437 | if (n_token_count > n_token_capacity) { | ||
| 2438 | LLAMA_LOG_ERROR("%s: token count in sequence state file exceeded capacity! %u > %zu\n", __func__, n_token_count, n_token_capacity); | ||
| 2439 | return 0; | ||
| 2440 | } | ||
| 2441 | |||
| 2442 | file.read_raw(tokens_out, sizeof(llama_token) * n_token_count); | ||
| 2443 | *n_token_count_out = n_token_count; | ||
| 2444 | } | ||
| 2445 | |||
| 2446 | // restore the context state | ||
| 2447 | { | ||
| 2448 | const size_t state_size = file.size() - file.tell(); | ||
| 2449 | llama_io_read_file io(&file); | ||
| 2450 | const size_t nread = state_seq_read_data(io, seq_id, 0); | ||
| 2451 | if (!nread) { | ||
| 2452 | LLAMA_LOG_ERROR("%s: failed to restore sequence state\n", __func__); | ||
| 2453 | return 0; | ||
| 2454 | } | ||
| 2455 | GGML_ASSERT(nread <= state_size); | ||
| 2456 | GGML_ASSERT(nread + sizeof(uint32_t) * 3 + sizeof(llama_token) * *n_token_count_out == file.tell()); | ||
| 2457 | } | ||
| 2458 | |||
| 2459 | return file.tell(); | ||
| 2460 | } | ||
| 2461 | |||
| 2462 | size_t llama_context::state_seq_save_file(llama_seq_id seq_id, const char * filepath, const llama_token * tokens, size_t n_token_count) { | ||
| 2463 | llama_file file(filepath, "wb"); | ||
| 2464 | |||
| 2465 | file.write_u32(LLAMA_STATE_SEQ_MAGIC); | ||
| 2466 | file.write_u32(LLAMA_STATE_SEQ_VERSION); | ||
| 2467 | |||
| 2468 | // save the prompt | ||
| 2469 | file.write_u32((uint32_t) n_token_count); | ||
| 2470 | file.write_raw(tokens, sizeof(llama_token) * n_token_count); | ||
| 2471 | |||
| 2472 | // save the context state using stream saving | ||
| 2473 | llama_io_write_file io(&file); | ||
| 2474 | state_seq_write_data(io, seq_id, 0); | ||
| 2475 | |||
| 2476 | const size_t res = file.tell(); | ||
| 2477 | GGML_ASSERT(res == sizeof(uint32_t) * 3 + sizeof(llama_token) * n_token_count + io.n_bytes()); | ||
| 2478 | |||
| 2479 | return res; | ||
| 2480 | } | ||
| 2481 | |||
| 2482 | size_t llama_context::state_write_data(llama_io_write_i & io) { | ||
| 2483 | LLAMA_LOG_DEBUG("%s: writing state\n", __func__); | ||
| 2484 | |||
| 2485 | // write model info | ||
| 2486 | { | ||
| 2487 | LLAMA_LOG_DEBUG("%s: - writing model info\n", __func__); | ||
| 2488 | |||
| 2489 | const std::string arch_str = llm_arch_name(model.arch); | ||
| 2490 | io.write_string(arch_str); | ||
| 2491 | // TODO: add more model-specific info which should prevent loading the session file if not identical | ||
| 2492 | } | ||
| 2493 | |||
| 2494 | // write output ids | ||
| 2495 | { | ||
| 2496 | LLAMA_LOG_DEBUG("%s: - writing output ids\n", __func__); | ||
| 2497 | |||
| 2498 | const auto n_outputs = this->n_outputs; | ||
| 2499 | const auto & output_ids = this->output_ids; | ||
| 2500 | |||
| 2501 | std::vector<int32_t> w_output_pos; | ||
| 2502 | |||
| 2503 | w_output_pos.resize(n_outputs); | ||
| 2504 | |||
| 2505 | // build a more compact representation of the output ids | ||
| 2506 | for (size_t i = 0; i < n_batch(); ++i) { | ||
| 2507 | // map an output id to a position in the batch | ||
| 2508 | int64_t pos = output_ids[i]; | ||
| 2509 | if (pos >= 0) { | ||
| 2510 | GGML_ASSERT(pos < n_outputs); | ||
| 2511 | w_output_pos[pos] = i; | ||
| 2512 | } | ||
| 2513 | } | ||
| 2514 | |||
| 2515 | io.write(&n_outputs, sizeof(n_outputs)); | ||
| 2516 | |||
| 2517 | if (n_outputs) { | ||
| 2518 | io.write(w_output_pos.data(), n_outputs * sizeof(int32_t)); | ||
| 2519 | } | ||
| 2520 | } | ||
| 2521 | |||
| 2522 | // [TAG_CONTEXT_STATE_LOGITS] | ||
| 2523 | // write logits | ||
| 2524 | { | ||
| 2525 | LLAMA_LOG_DEBUG("%s: - writing logits\n", __func__); | ||
| 2526 | |||
| 2527 | const uint64_t logits_size = std::min((uint64_t) this->logits.size, (uint64_t) n_outputs * model.vocab.n_tokens()); | ||
| 2528 | |||
| 2529 | io.write(&logits_size, sizeof(logits_size)); | ||
| 2530 | |||
| 2531 | if (logits_size) { | ||
| 2532 | io.write(logits.data, logits_size * sizeof(float)); | ||
| 2533 | } | ||
| 2534 | } | ||
| 2535 | |||
| 2536 | // write embeddings | ||
| 2537 | { | ||
| 2538 | LLAMA_LOG_DEBUG("%s: - writing embeddings\n", __func__); | ||
| 2539 | |||
| 2540 | const uint64_t embd_size = std::min((uint64_t) this->embd.size, (uint64_t) n_outputs * model.hparams.n_embd); | ||
| 2541 | |||
| 2542 | io.write(&embd_size, sizeof(embd_size)); | ||
| 2543 | |||
| 2544 | if (embd_size) { | ||
| 2545 | io.write(embd.data, embd_size * sizeof(float)); | ||
| 2546 | } | ||
| 2547 | } | ||
| 2548 | |||
| 2549 | // TODO: handle sampling buffers and samplers state ? | ||
| 2550 | // https://github.com/ggml-org/llama.cpp/pull/17004 | ||
| 2551 | |||
| 2552 | if (memory != nullptr) { | ||
| 2553 | LLAMA_LOG_DEBUG("%s: - writing memory module\n", __func__); | ||
| 2554 | memory->state_write(io); | ||
| 2555 | } | ||
| 2556 | |||
| 2557 | return io.n_bytes(); | ||
| 2558 | } | ||
| 2559 | |||
| 2560 | size_t llama_context::state_read_data(llama_io_read_i & io) { | ||
| 2561 | LLAMA_LOG_DEBUG("%s: reading state\n", __func__); | ||
| 2562 | |||
| 2563 | // read model info | ||
| 2564 | { | ||
| 2565 | LLAMA_LOG_DEBUG("%s: - reading model info\n", __func__); | ||
| 2566 | |||
| 2567 | const std::string cur_arch_str = llm_arch_name(model.arch); | ||
| 2568 | |||
| 2569 | std::string arch_str; | ||
| 2570 | io.read_string(arch_str); | ||
| 2571 | if (cur_arch_str != arch_str) { | ||
| 2572 | throw std::runtime_error(format("wrong model arch: '%s' instead of '%s'", arch_str.c_str(), cur_arch_str.c_str())); | ||
| 2573 | } | ||
| 2574 | // TODO: add more info which needs to be identical but which is not verified otherwise | ||
| 2575 | } | ||
| 2576 | |||
| 2577 | // read output ids | ||
| 2578 | { | ||
| 2579 | LLAMA_LOG_DEBUG("%s: - reading output ids\n", __func__); | ||
| 2580 | |||
| 2581 | auto n_outputs = this->n_outputs; | ||
| 2582 | io.read_to(&n_outputs, sizeof(n_outputs)); | ||
| 2583 | |||
| 2584 | if (n_outputs > output_reserve(n_outputs)) { | ||
| 2585 | throw std::runtime_error("could not reserve outputs"); | ||
| 2586 | } | ||
| 2587 | |||
| 2588 | std::vector<int32_t> output_pos; | ||
| 2589 | |||
| 2590 | if (n_outputs) { | ||
| 2591 | output_pos.resize(n_outputs); | ||
| 2592 | io.read_to(output_pos.data(), n_outputs * sizeof(int32_t)); | ||
| 2593 | |||
| 2594 | for (int32_t i = 0; i < (int32_t) output_pos.size(); ++i) { | ||
| 2595 | int32_t id = output_pos[i]; | ||
| 2596 | if ((uint32_t) id >= n_batch()) { | ||
| 2597 | throw std::runtime_error(format("invalid output id, %d does not fit in batch size of %u", id, n_batch())); | ||
| 2598 | } | ||
| 2599 | this->output_ids[id] = i; | ||
| 2600 | } | ||
| 2601 | |||
| 2602 | this->n_outputs = n_outputs; | ||
| 2603 | } | ||
| 2604 | } | ||
| 2605 | |||
| 2606 | // read logits | ||
| 2607 | { | ||
| 2608 | LLAMA_LOG_DEBUG("%s: - reading logits\n", __func__); | ||
| 2609 | |||
| 2610 | uint64_t logits_size; | ||
| 2611 | io.read_to(&logits_size, sizeof(logits_size)); | ||
| 2612 | |||
| 2613 | if (this->logits.size < logits_size) { | ||
| 2614 | throw std::runtime_error("logits buffer too small"); | ||
| 2615 | } | ||
| 2616 | |||
| 2617 | if (logits_size) { | ||
| 2618 | io.read_to(this->logits.data, logits_size * sizeof(float)); | ||
| 2619 | } | ||
| 2620 | } | ||
| 2621 | |||
| 2622 | // read embeddings | ||
| 2623 | { | ||
| 2624 | LLAMA_LOG_DEBUG("%s: - reading embeddings\n", __func__); | ||
| 2625 | |||
| 2626 | uint64_t embd_size; | ||
| 2627 | io.read_to(&embd_size, sizeof(embd_size)); | ||
| 2628 | |||
| 2629 | if (this->embd.size < embd_size) { | ||
| 2630 | throw std::runtime_error("embeddings buffer too small"); | ||
| 2631 | } | ||
| 2632 | |||
| 2633 | if (embd_size) { | ||
| 2634 | io.read_to(this->embd.data, embd_size * sizeof(float)); | ||
| 2635 | } | ||
| 2636 | } | ||
| 2637 | |||
| 2638 | // TODO: handle sampling buffers and samplers state ? | ||
| 2639 | // https://github.com/ggml-org/llama.cpp/pull/17004 | ||
| 2640 | |||
| 2641 | if (memory) { | ||
| 2642 | LLAMA_LOG_DEBUG("%s: - reading memory module\n", __func__); | ||
| 2643 | |||
| 2644 | memory->state_read(io); | ||
| 2645 | } | ||
| 2646 | |||
| 2647 | return io.n_bytes(); | ||
| 2648 | } | ||
| 2649 | |||
| 2650 | size_t llama_context::state_seq_write_data(llama_io_write_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) { | ||
| 2651 | GGML_UNUSED(seq_id); | ||
| 2652 | |||
| 2653 | if (memory) { | ||
| 2654 | memory->state_write(io, seq_id, flags); | ||
| 2655 | } | ||
| 2656 | |||
| 2657 | return io.n_bytes(); | ||
| 2658 | } | ||
| 2659 | |||
| 2660 | size_t llama_context::state_seq_read_data(llama_io_read_i & io, llama_seq_id seq_id, llama_state_seq_flags flags) { | ||
| 2661 | GGML_UNUSED(seq_id); | ||
| 2662 | |||
| 2663 | if (memory) { | ||
| 2664 | memory->state_read(io, seq_id, flags); | ||
| 2665 | } | ||
| 2666 | |||
| 2667 | return io.n_bytes(); | ||
| 2668 | } | ||
| 2669 | |||
| 2670 | // | ||
| 2671 | // perf | ||
| 2672 | // | ||
| 2673 | |||
| 2674 | llama_perf_context_data llama_context::perf_get_data() const { | ||
| 2675 | llama_perf_context_data data = {}; | ||
| 2676 | |||
| 2677 | data.t_start_ms = 1e-3 * t_start_us; | ||
| 2678 | data.t_load_ms = 1e-3 * t_load_us; | ||
| 2679 | data.t_p_eval_ms = 1e-3 * t_p_eval_us; | ||
| 2680 | data.t_eval_ms = 1e-3 * t_eval_us; | ||
| 2681 | data.n_p_eval = std::max(1, n_p_eval); | ||
| 2682 | data.n_eval = std::max(1, n_eval); | ||
| 2683 | data.n_reused = std::max(0, n_reused); | ||
| 2684 | |||
| 2685 | return data; | ||
| 2686 | } | ||
| 2687 | |||
| 2688 | void llama_context::perf_reset() { | ||
| 2689 | t_start_us = ggml_time_us(); | ||
| 2690 | t_eval_us = n_eval = 0; | ||
| 2691 | t_p_eval_us = n_p_eval = 0; | ||
| 2692 | n_reused = 0; | ||
| 2693 | } | ||
| 2694 | |||
| 2695 | std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> llama_context::memory_breakdown() const { | ||
| 2696 | std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> ret; | ||
| 2697 | for (const auto & [buft, size] : model.memory_breakdown()) { | ||
| 2698 | ret[buft].model += size; | ||
| 2699 | } | ||
| 2700 | if (memory) { | ||
| 2701 | for (const auto & [buft, size] : memory->memory_breakdown()) { | ||
| 2702 | ret[buft].context += size; | ||
| 2703 | } | ||
| 2704 | } | ||
| 2705 | if (model.hparams.no_alloc) { | ||
| 2706 | for (size_t i = 0; i < backends.size(); ++i) { | ||
| 2707 | ggml_backend_t backend = backends[i].get(); | ||
| 2708 | ggml_backend_buffer_type_t buft = ggml_backend_sched_get_buffer_type(sched.get(), backend); | ||
| 2709 | ret[buft].compute += backend_buf_exp_size[i]; | ||
| 2710 | } | ||
| 2711 | } else { | ||
| 2712 | for (const auto & backend_ptr : backends) { | ||
| 2713 | ggml_backend_t backend = backend_ptr.get(); | ||
| 2714 | ggml_backend_buffer_type_t buft = ggml_backend_sched_get_buffer_type(sched.get(), backend); | ||
| 2715 | ret[buft].compute += ggml_backend_sched_get_buffer_size(sched.get(), backend); | ||
| 2716 | } | ||
| 2717 | } | ||
| 2718 | return ret; | ||
| 2719 | } | ||
| 2720 | |||
| 2721 | // | ||
| 2722 | // training | ||
| 2723 | // | ||
| 2724 | |||
| 2725 | static void llama_set_param(struct ggml_tensor * tensor, llama_opt_param_filter param_filter, void * userdata) { | ||
| 2726 | if (!tensor || tensor->type != GGML_TYPE_F32) { | ||
| 2727 | return; | ||
| 2728 | } | ||
| 2729 | if (!param_filter(tensor, userdata)) { | ||
| 2730 | return; | ||
| 2731 | } | ||
| 2732 | if (strcmp(tensor->name, "token_embd.weight") == 0) { | ||
| 2733 | return; // FIXME | ||
| 2734 | } | ||
| 2735 | if (strcmp(tensor->name, "rope_freqs.weight") == 0) { | ||
| 2736 | return; // FIXME | ||
| 2737 | } | ||
| 2738 | ggml_set_param(tensor); | ||
| 2739 | } | ||
| 2740 | |||
| 2741 | void llama_context::opt_init(struct llama_model * model, struct llama_opt_params lopt_params) { | ||
| 2742 | GGML_ASSERT(!opt_ctx); | ||
| 2743 | model->hparams.n_ctx_train = lopt_params.n_ctx_train > 0 ? lopt_params.n_ctx_train : n_ctx(); | ||
| 2744 | const uint32_t n_batch = std::min(this->n_batch(), model->hparams.n_ctx_train); | ||
| 2745 | const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch); | ||
| 2746 | GGML_ASSERT(model->hparams.n_ctx_train % n_batch == 0); | ||
| 2747 | GGML_ASSERT(n_batch % n_ubatch == 0); | ||
| 2748 | |||
| 2749 | ggml_opt_params opt_params = ggml_opt_default_params(sched.get(), GGML_OPT_LOSS_TYPE_CROSS_ENTROPY); | ||
| 2750 | opt_params.opt_period = n_batch / n_ubatch; | ||
| 2751 | opt_params.get_opt_pars = lopt_params.get_opt_pars; | ||
| 2752 | opt_params.get_opt_pars_ud = lopt_params.get_opt_pars_ud; | ||
| 2753 | opt_params.optimizer = lopt_params.optimizer_type; | ||
| 2754 | opt_ctx = ggml_opt_init(opt_params); | ||
| 2755 | |||
| 2756 | llama_opt_param_filter param_filter = lopt_params.param_filter; | ||
| 2757 | void * param_filter_ud = lopt_params.param_filter_ud; | ||
| 2758 | |||
| 2759 | //llama_set_param(model->tok_embd, param_filter, param_filter_ud); // FIXME | ||
| 2760 | llama_set_param(model->type_embd, param_filter, param_filter_ud); | ||
| 2761 | llama_set_param(model->pos_embd, param_filter, param_filter_ud); | ||
| 2762 | llama_set_param(model->tok_norm, param_filter, param_filter_ud); | ||
| 2763 | llama_set_param(model->tok_norm_b, param_filter, param_filter_ud); | ||
| 2764 | llama_set_param(model->output_norm, param_filter, param_filter_ud); | ||
| 2765 | llama_set_param(model->output_norm_b, param_filter, param_filter_ud); | ||
| 2766 | llama_set_param(model->output, param_filter, param_filter_ud); | ||
| 2767 | llama_set_param(model->output_b, param_filter, param_filter_ud); | ||
| 2768 | llama_set_param(model->output_norm_enc, param_filter, param_filter_ud); | ||
| 2769 | llama_set_param(model->cls, param_filter, param_filter_ud); | ||
| 2770 | llama_set_param(model->cls_b, param_filter, param_filter_ud); | ||
| 2771 | llama_set_param(model->cls_out, param_filter, param_filter_ud); | ||
| 2772 | llama_set_param(model->cls_out_b, param_filter, param_filter_ud); | ||
| 2773 | |||
| 2774 | for (struct llama_layer & layer : model->layers) { | ||
| 2775 | for (size_t i = 0; i < sizeof(layer)/sizeof(struct ggml_tensor *); ++i) { | ||
| 2776 | llama_set_param(reinterpret_cast<struct ggml_tensor **>(&layer)[i], param_filter, param_filter_ud); | ||
| 2777 | } | ||
| 2778 | } | ||
| 2779 | } | ||
| 2780 | |||
| 2781 | void llama_context::opt_epoch_iter( | ||
| 2782 | ggml_opt_dataset_t dataset, | ||
| 2783 | ggml_opt_result_t result, | ||
| 2784 | const std::vector<llama_token> & tokens, | ||
| 2785 | const std::vector<llama_token> & labels_sparse, | ||
| 2786 | llama_batch & batch, | ||
| 2787 | ggml_opt_epoch_callback callback, | ||
| 2788 | bool train, | ||
| 2789 | int64_t idata_in_loop, | ||
| 2790 | int64_t ndata_in_loop, | ||
| 2791 | int64_t t_loop_start) { | ||
| 2792 | GGML_ASSERT(opt_ctx); | ||
| 2793 | const uint32_t n_ctx = llama_model_n_ctx_train(&model); | ||
| 2794 | const uint32_t n_batch = std::min(this->n_batch(), n_ctx); | ||
| 2795 | const uint32_t n_ubatch = std::min(this->n_ubatch(), n_batch); | ||
| 2796 | |||
| 2797 | memory->clear(true); | ||
| 2798 | |||
| 2799 | for (uint32_t pos_ctx = 0; pos_ctx < n_ctx; pos_ctx += n_batch) { | ||
| 2800 | batch.n_tokens = n_batch; | ||
| 2801 | for (uint32_t pos_batch = 0; pos_batch < n_batch; ++pos_batch) { | ||
| 2802 | batch.token [pos_batch] = tokens[pos_ctx + pos_batch]; | ||
| 2803 | batch.pos [pos_batch] = pos_ctx + pos_batch; | ||
| 2804 | batch.n_seq_id[pos_batch] = 1; | ||
| 2805 | batch.seq_id [pos_batch][0] = 0; | ||
| 2806 | batch.logits [pos_batch] = true; | ||
| 2807 | } | ||
| 2808 | |||
| 2809 | if (!balloc->init(batch, model.vocab, nullptr, model.hparams.n_embd_inp(), cparams.kv_unified ? LLAMA_MAX_SEQ : cparams.n_seq_max, true)) { | ||
| 2810 | LLAMA_LOG_ERROR("%s: failed to initialize batch\n", __func__); | ||
| 2811 | return; | ||
| 2812 | } | ||
| 2813 | |||
| 2814 | const uint32_t n_tokens_all = balloc->get_n_tokens(); | ||
| 2815 | |||
| 2816 | n_queued_tokens += n_tokens_all; | ||
| 2817 | |||
| 2818 | embd_seq.clear(); | ||
| 2819 | |||
| 2820 | uint32_t n_outputs_all = n_tokens_all; | ||
| 2821 | |||
| 2822 | auto mctx = memory->init_batch(*balloc, cparams.n_ubatch, true); | ||
| 2823 | if (!mctx || mctx->get_status() != LLAMA_MEMORY_STATUS_SUCCESS) { | ||
| 2824 | LLAMA_LOG_ERROR("%s: could not initialize batch\n", __func__); | ||
| 2825 | break; | ||
| 2826 | } | ||
| 2827 | |||
| 2828 | // reserve output buffer | ||
| 2829 | if (output_reserve(n_outputs_all) < n_outputs_all) { | ||
| 2830 | LLAMA_LOG_ERROR("%s: could not reserve space for batch with %d outputs\n", __func__, n_outputs_all); | ||
| 2831 | GGML_ABORT("TODO: handle this error"); | ||
| 2832 | }; | ||
| 2833 | |||
| 2834 | uint32_t pos_batch = 0; | ||
| 2835 | do { | ||
| 2836 | const auto & ubatch = mctx->get_ubatch(); | ||
| 2837 | |||
| 2838 | n_outputs = ubatch.n_tokens; | ||
| 2839 | |||
| 2840 | if (!mctx->apply()) { | ||
| 2841 | LLAMA_LOG_ERROR("%s: failed to update the memory context\n", __func__); | ||
| 2842 | break; | ||
| 2843 | } | ||
| 2844 | |||
| 2845 | auto * res = gf_res_prev.get(); | ||
| 2846 | |||
| 2847 | const auto gparams = graph_params(res, ubatch, mctx.get(), LLM_GRAPH_TYPE_DEFAULT); | ||
| 2848 | |||
| 2849 | res->reset(); | ||
| 2850 | |||
| 2851 | auto * gf = model.build_graph(gparams); | ||
| 2852 | |||
| 2853 | struct ggml_context * ctx_compute_opt; | ||
| 2854 | { | ||
| 2855 | const size_t size_gf = ggml_graph_size(gf); | ||
| 2856 | const size_t size_meta = 4*size_gf*ggml_tensor_overhead() + 2*ggml_graph_overhead_custom(size_gf, /*grads = */ true); | ||
| 2857 | struct ggml_init_params params = { | ||
| 2858 | /*.mem_size =*/ size_meta, | ||
| 2859 | /*.mem_buffer =*/ nullptr, | ||
| 2860 | /*.no_alloc =*/ true, | ||
| 2861 | }; | ||
| 2862 | ctx_compute_opt = ggml_init(params); | ||
| 2863 | } | ||
| 2864 | ggml_opt_prepare_alloc(opt_ctx, ctx_compute_opt, gf, res->get_inp_tokens(), res->get_logits()); | ||
| 2865 | ggml_opt_alloc(opt_ctx, train); | ||
| 2866 | |||
| 2867 | res->set_inputs(&ubatch); | ||
| 2868 | { | ||
| 2869 | struct ggml_tensor * labels = ggml_opt_labels(opt_ctx); | ||
| 2870 | GGML_ASSERT(labels->ne[1] == n_ubatch); | ||
| 2871 | ggml_set_zero(labels); | ||
| 2872 | const float onef = 1.0f; | ||
| 2873 | for (uint32_t pos_ubatch = 0; pos_ubatch < n_ubatch; ++pos_ubatch) { | ||
| 2874 | const uint32_t ilabel = pos_ctx + pos_batch + pos_ubatch; | ||
| 2875 | GGML_ASSERT(labels_sparse[ilabel] < labels->ne[0]); | ||
| 2876 | ggml_backend_tensor_set(labels, &onef, (pos_ubatch*labels->ne[0] + labels_sparse[ilabel])*sizeof(float), sizeof(float)); | ||
| 2877 | } | ||
| 2878 | } | ||
| 2879 | ggml_opt_eval(opt_ctx, result); | ||
| 2880 | if (callback) { | ||
| 2881 | callback(train, opt_ctx, dataset, result, idata_in_loop + (pos_ctx + pos_batch)/n_ubatch + 1, ndata_in_loop, t_loop_start); | ||
| 2882 | } | ||
| 2883 | ggml_free(ctx_compute_opt); | ||
| 2884 | |||
| 2885 | pos_batch += ubatch.n_tokens; | ||
| 2886 | } while (mctx->next()); | ||
| 2887 | } | ||
| 2888 | } | ||
| 2889 | |||
| 2890 | void llama_context::opt_epoch( | ||
| 2891 | ggml_opt_dataset_t dataset, | ||
| 2892 | ggml_opt_result_t result_train, | ||
| 2893 | ggml_opt_result_t result_eval, | ||
| 2894 | int64_t idata_split, | ||
| 2895 | ggml_opt_epoch_callback callback_train, | ||
| 2896 | ggml_opt_epoch_callback callback_eval) { | ||
| 2897 | const uint32_t n_ctx = this->n_ctx(); | ||
| 2898 | const uint32_t n_batch = std::min(cparams.n_batch, n_ctx); | ||
| 2899 | const uint32_t n_ubatch = std::min(cparams.n_ubatch, n_batch); | ||
| 2900 | const int64_t ndata = ggml_opt_dataset_ndata(dataset); | ||
| 2901 | |||
| 2902 | GGML_ASSERT(idata_split >= 0); | ||
| 2903 | GGML_ASSERT(idata_split <= ndata); | ||
| 2904 | |||
| 2905 | const uint32_t ubatch_per_ctx = n_ctx / n_ubatch; | ||
| 2906 | |||
| 2907 | struct llama_batch batch = llama_batch_init(n_batch, 0, 1); | ||
| 2908 | std::vector<llama_token> tokens(n_ctx); | ||
| 2909 | std::vector<llama_token> labels_sparse(n_ctx); | ||
| 2910 | |||
| 2911 | int64_t idata = 0; | ||
| 2912 | |||
| 2913 | int64_t t_loop_start = ggml_time_us(); | ||
| 2914 | int64_t ndata_in_loop = idata_split*ubatch_per_ctx; | ||
| 2915 | for (; idata < idata_split; ++idata) { | ||
| 2916 | constexpr bool train = true; | ||
| 2917 | const int64_t idata_in_loop = idata*ubatch_per_ctx; | ||
| 2918 | |||
| 2919 | ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata); | ||
| 2920 | opt_epoch_iter(dataset, result_train, tokens, labels_sparse, batch, | ||
| 2921 | callback_train, train, idata_in_loop, ndata_in_loop, t_loop_start); | ||
| 2922 | } | ||
| 2923 | |||
| 2924 | t_loop_start = ggml_time_us(); | ||
| 2925 | ndata_in_loop = (ndata - idata_split)*ubatch_per_ctx; | ||
| 2926 | for (; idata < ndata; ++idata) { | ||
| 2927 | constexpr bool train = false; | ||
| 2928 | const int64_t idata_in_loop = (idata - idata_split)*ubatch_per_ctx; | ||
| 2929 | |||
| 2930 | ggml_opt_dataset_get_batch_host(dataset, tokens.data(), n_ctx*sizeof(llama_token), labels_sparse.data(), idata); | ||
| 2931 | opt_epoch_iter(dataset, result_eval, tokens, labels_sparse, batch, | ||
| 2932 | callback_eval, train, idata_in_loop, ndata_in_loop, t_loop_start); | ||
| 2933 | } | ||
| 2934 | |||
| 2935 | llama_batch_free(batch); | ||
| 2936 | } | ||
| 2937 | |||
| 2938 | // | ||
| 2939 | // interface implementation | ||
| 2940 | // | ||
| 2941 | |||
| 2942 | llama_context_params llama_context_default_params() { | ||
| 2943 | llama_context_params result = { | ||
| 2944 | /*.n_ctx =*/ 512, | ||
| 2945 | /*.n_batch =*/ 2048, | ||
| 2946 | /*.n_ubatch =*/ 512, | ||
| 2947 | /*.n_seq_max =*/ 1, | ||
| 2948 | /*.n_threads =*/ GGML_DEFAULT_N_THREADS, // TODO: better default | ||
| 2949 | /*.n_threads_batch =*/ GGML_DEFAULT_N_THREADS, | ||
| 2950 | /*.rope_scaling_type =*/ LLAMA_ROPE_SCALING_TYPE_UNSPECIFIED, | ||
| 2951 | /*.pooling_type =*/ LLAMA_POOLING_TYPE_UNSPECIFIED, | ||
| 2952 | /*.attention_type =*/ LLAMA_ATTENTION_TYPE_UNSPECIFIED, | ||
| 2953 | /*.flash_attn_type =*/ LLAMA_FLASH_ATTN_TYPE_AUTO, | ||
| 2954 | /*.rope_freq_base =*/ 0.0f, | ||
| 2955 | /*.rope_freq_scale =*/ 0.0f, | ||
| 2956 | /*.yarn_ext_factor =*/ -1.0f, | ||
| 2957 | /*.yarn_attn_factor =*/ -1.0f, | ||
| 2958 | /*.yarn_beta_fast =*/ -1.0f, | ||
| 2959 | /*.yarn_beta_slow =*/ -1.0f, | ||
| 2960 | /*.yarn_orig_ctx =*/ 0, | ||
| 2961 | /*.defrag_thold =*/ -1.0f, | ||
| 2962 | /*.cb_eval =*/ nullptr, | ||
| 2963 | /*.cb_eval_user_data =*/ nullptr, | ||
| 2964 | /*.type_k =*/ GGML_TYPE_F16, | ||
| 2965 | /*.type_v =*/ GGML_TYPE_F16, | ||
| 2966 | /*.abort_callback =*/ nullptr, | ||
| 2967 | /*.abort_callback_data =*/ nullptr, | ||
| 2968 | /*.embeddings =*/ false, | ||
| 2969 | /*.offload_kqv =*/ true, | ||
| 2970 | /*.no_perf =*/ true, | ||
| 2971 | /*.op_offload =*/ true, | ||
| 2972 | /*.swa_full =*/ true, | ||
| 2973 | /*.kv_unified =*/ false, | ||
| 2974 | /*.sampler =*/ nullptr, | ||
| 2975 | /*.n_sampler =*/ 0, | ||
| 2976 | }; | ||
| 2977 | |||
| 2978 | return result; | ||
| 2979 | } | ||
| 2980 | |||
| 2981 | llama_context * llama_init_from_model( | ||
| 2982 | llama_model * model, | ||
| 2983 | llama_context_params params) { | ||
| 2984 | if (!model) { | ||
| 2985 | LLAMA_LOG_ERROR("%s: model cannot be NULL\n", __func__); | ||
| 2986 | return nullptr; | ||
| 2987 | } | ||
| 2988 | |||
| 2989 | if (params.n_batch == 0 && params.n_ubatch == 0) { | ||
| 2990 | LLAMA_LOG_ERROR("%s: n_batch and n_ubatch cannot both be zero\n", __func__); | ||
| 2991 | return nullptr; | ||
| 2992 | } | ||
| 2993 | |||
| 2994 | if (params.n_ctx == 0 && model->hparams.n_ctx_train == 0) { | ||
| 2995 | LLAMA_LOG_ERROR("%s: n_ctx and model->hparams.n_ctx_train cannot both be zero\n", __func__); | ||
| 2996 | return nullptr; | ||
| 2997 | } | ||
| 2998 | |||
| 2999 | if (params.flash_attn_type != LLAMA_FLASH_ATTN_TYPE_DISABLED && model->arch == LLM_ARCH_GROK) { | ||
| 3000 | LLAMA_LOG_WARN("%s: flash_attn is not compatible with Grok - forcing off\n", __func__); | ||
| 3001 | params.flash_attn_type = LLAMA_FLASH_ATTN_TYPE_DISABLED; | ||
| 3002 | } | ||
| 3003 | |||
| 3004 | if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO && ggml_is_quantized(params.type_k)) { | ||
| 3005 | const uint32_t blck_size = ggml_blck_size(params.type_k); | ||
| 3006 | if (model->hparams.n_embd_head_k % blck_size != 0) { | ||
| 3007 | LLAMA_LOG_ERROR("%s: K cache type %s with block size %u does not divide n_embd_head_k=%u\n", | ||
| 3008 | __func__, ggml_type_name(params.type_k), blck_size, model->hparams.n_embd_head_k); | ||
| 3009 | return nullptr; | ||
| 3010 | } | ||
| 3011 | } | ||
| 3012 | |||
| 3013 | if (params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_AUTO && ggml_is_quantized(params.type_v)) { | ||
| 3014 | const uint32_t blck_size = ggml_blck_size(params.type_v); | ||
| 3015 | if (model->hparams.n_embd_head_v % blck_size != 0) { | ||
| 3016 | LLAMA_LOG_ERROR("%s: V cache type %s with block size %u does not divide n_embd_head_k=%u\n", | ||
| 3017 | __func__, ggml_type_name(params.type_v), blck_size, model->hparams.n_embd_head_v); | ||
| 3018 | return nullptr; | ||
| 3019 | } | ||
| 3020 | } | ||
| 3021 | |||
| 3022 | if (ggml_is_quantized(params.type_v) && params.flash_attn_type == LLAMA_FLASH_ATTN_TYPE_DISABLED) { | ||
| 3023 | LLAMA_LOG_ERROR("%s: V cache quantization requires flash_attn\n", __func__); | ||
| 3024 | return nullptr; | ||
| 3025 | } | ||
| 3026 | |||
| 3027 | if (params.pooling_type != LLAMA_POOLING_TYPE_UNSPECIFIED && | ||
| 3028 | params.pooling_type != model->hparams.pooling_type) { | ||
| 3029 | //user-specified pooling-type is different from the model default | ||
| 3030 | LLAMA_LOG_WARN("%s: model default pooling_type is [%d], but [%d] was specified\n", __func__, | ||
| 3031 | model->hparams.pooling_type, params.pooling_type); | ||
| 3032 | } | ||
| 3033 | |||
| 3034 | try { | ||
| 3035 | auto * ctx = new llama_context(*model, params); | ||
| 3036 | return ctx; | ||
| 3037 | } catch (const std::exception & err) { | ||
| 3038 | LLAMA_LOG_ERROR("%s: failed to initialize the context: %s\n", __func__, err.what()); | ||
| 3039 | } | ||
| 3040 | |||
| 3041 | return nullptr; | ||
| 3042 | } | ||
| 3043 | |||
| 3044 | // deprecated | ||
| 3045 | llama_context * llama_new_context_with_model( | ||
| 3046 | llama_model * model, | ||
| 3047 | llama_context_params params) { | ||
| 3048 | return llama_init_from_model(model, params); | ||
| 3049 | } | ||
| 3050 | |||
| 3051 | void llama_free(llama_context * ctx) { | ||
| 3052 | delete ctx; | ||
| 3053 | } | ||
| 3054 | |||
| 3055 | uint32_t llama_n_ctx(const llama_context * ctx) { | ||
| 3056 | return ctx->n_ctx(); | ||
| 3057 | } | ||
| 3058 | |||
| 3059 | uint32_t llama_n_ctx_seq(const llama_context * ctx) { | ||
| 3060 | return ctx->n_ctx_seq(); | ||
| 3061 | } | ||
| 3062 | |||
| 3063 | uint32_t llama_n_batch(const llama_context * ctx) { | ||
| 3064 | return ctx->n_batch(); | ||
| 3065 | } | ||
| 3066 | |||
| 3067 | uint32_t llama_n_ubatch(const llama_context * ctx) { | ||
| 3068 | return ctx->n_ubatch(); | ||
| 3069 | } | ||
| 3070 | |||
| 3071 | uint32_t llama_n_seq_max(const llama_context * ctx) { | ||
| 3072 | return ctx->n_seq_max(); | ||
| 3073 | } | ||
| 3074 | |||
| 3075 | const llama_model * llama_get_model(const llama_context * ctx) { | ||
| 3076 | return &ctx->get_model(); | ||
| 3077 | } | ||
| 3078 | |||
| 3079 | enum llama_pooling_type llama_pooling_type(const llama_context * ctx) { | ||
| 3080 | return ctx->pooling_type(); | ||
| 3081 | } | ||
| 3082 | |||
| 3083 | void llama_attach_threadpool( | ||
| 3084 | llama_context * ctx, | ||
| 3085 | ggml_threadpool_t threadpool, | ||
| 3086 | ggml_threadpool_t threadpool_batch) { | ||
| 3087 | ctx->attach_threadpool(threadpool, threadpool_batch); | ||
| 3088 | } | ||
| 3089 | |||
| 3090 | void llama_detach_threadpool(llama_context * ctx) { | ||
| 3091 | ctx->detach_threadpool(); | ||
| 3092 | } | ||
| 3093 | |||
| 3094 | void llama_set_n_threads(llama_context * ctx, int32_t n_threads, int32_t n_threads_batch) { | ||
| 3095 | ctx->set_n_threads(n_threads, n_threads_batch); | ||
| 3096 | } | ||
| 3097 | |||
| 3098 | int32_t llama_n_threads(llama_context * ctx) { | ||
| 3099 | return ctx->n_threads(); | ||
| 3100 | } | ||
| 3101 | |||
| 3102 | int32_t llama_n_threads_batch(llama_context * ctx) { | ||
| 3103 | return ctx->n_threads_batch(); | ||
| 3104 | } | ||
| 3105 | |||
| 3106 | void llama_set_abort_callback(llama_context * ctx, bool (*abort_callback)(void * data), void * abort_callback_data) { | ||
| 3107 | ctx->set_abort_callback(abort_callback, abort_callback_data); | ||
| 3108 | } | ||
| 3109 | |||
| 3110 | void llama_set_embeddings(llama_context * ctx, bool embeddings) { | ||
| 3111 | ctx->set_embeddings(embeddings); | ||
| 3112 | } | ||
| 3113 | |||
| 3114 | void llama_set_causal_attn(llama_context * ctx, bool causal_attn) { | ||
| 3115 | ctx->set_causal_attn(causal_attn); | ||
| 3116 | } | ||
| 3117 | |||
| 3118 | void llama_set_warmup(llama_context * ctx, bool warmup) { | ||
| 3119 | ctx->set_warmup(warmup); | ||
| 3120 | } | ||
| 3121 | |||
| 3122 | void llama_synchronize(llama_context * ctx) { | ||
| 3123 | ctx->synchronize(); | ||
| 3124 | } | ||
| 3125 | |||
| 3126 | float * llama_get_logits(llama_context * ctx) { | ||
| 3127 | ctx->synchronize(); | ||
| 3128 | |||
| 3129 | return ctx->get_logits(); | ||
| 3130 | } | ||
| 3131 | |||
| 3132 | float * llama_get_logits_ith(llama_context * ctx, int32_t i) { | ||
| 3133 | ctx->synchronize(); | ||
| 3134 | |||
| 3135 | float * res = nullptr; | ||
| 3136 | |||
| 3137 | res = ctx->get_sampled_logits_ith(i); | ||
| 3138 | |||
| 3139 | if (!res) { | ||
| 3140 | res = ctx->get_logits_ith(i); | ||
| 3141 | } | ||
| 3142 | |||
| 3143 | return res; | ||
| 3144 | } | ||
| 3145 | |||
| 3146 | float * llama_get_embeddings(llama_context * ctx) { | ||
| 3147 | ctx->synchronize(); | ||
| 3148 | |||
| 3149 | return ctx->get_embeddings(); | ||
| 3150 | } | ||
| 3151 | |||
| 3152 | float * llama_get_embeddings_ith(llama_context * ctx, int32_t i) { | ||
| 3153 | ctx->synchronize(); | ||
| 3154 | |||
| 3155 | return ctx->get_embeddings_ith(i); | ||
| 3156 | } | ||
| 3157 | |||
| 3158 | float * llama_get_embeddings_seq(llama_context * ctx, llama_seq_id seq_id) { | ||
| 3159 | ctx->synchronize(); | ||
| 3160 | |||
| 3161 | return ctx->get_embeddings_seq(seq_id); | ||
| 3162 | } | ||
| 3163 | |||
| 3164 | bool llama_set_sampler(llama_context * ctx, llama_seq_id seq_id, llama_sampler * smpl) { | ||
| 3165 | return ctx->set_sampler(seq_id, smpl); | ||
| 3166 | } | ||
| 3167 | |||
| 3168 | llama_token llama_get_sampled_token_ith(llama_context * ctx, int32_t i) { | ||
| 3169 | ctx->synchronize(); | ||
| 3170 | |||
| 3171 | return ctx->get_sampled_token_ith(i); | ||
| 3172 | } | ||
| 3173 | |||
| 3174 | float * llama_get_sampled_probs_ith(llama_context * ctx, int32_t i) { | ||
| 3175 | ctx->synchronize(); | ||
| 3176 | |||
| 3177 | return ctx->get_sampled_probs_ith(i); | ||
| 3178 | } | ||
| 3179 | |||
| 3180 | float * llama_get_sampled_logits_ith(llama_context * ctx, int32_t i) { | ||
| 3181 | ctx->synchronize(); | ||
| 3182 | |||
| 3183 | return ctx->get_sampled_logits_ith(i); | ||
| 3184 | } | ||
| 3185 | |||
| 3186 | llama_token * llama_get_sampled_candidates_ith(llama_context * ctx, int32_t i) { | ||
| 3187 | ctx->synchronize(); | ||
| 3188 | |||
| 3189 | return const_cast<llama_token *>(ctx->get_sampled_candidates_ith(i)); | ||
| 3190 | } | ||
| 3191 | |||
| 3192 | uint32_t llama_get_sampled_candidates_count_ith(llama_context * ctx, int32_t i) { | ||
| 3193 | ctx->synchronize(); | ||
| 3194 | |||
| 3195 | return static_cast<uint32_t>(ctx->get_sampled_candidates_count(i)); | ||
| 3196 | } | ||
| 3197 | |||
| 3198 | uint32_t llama_get_sampled_logits_count_ith(llama_context * ctx, int32_t i) { | ||
| 3199 | ctx->synchronize(); | ||
| 3200 | |||
| 3201 | return static_cast<uint32_t>(ctx->get_sampled_logits_count(i)); | ||
| 3202 | } | ||
| 3203 | |||
| 3204 | uint32_t llama_get_sampled_probs_count_ith(llama_context * ctx, int32_t i) { | ||
| 3205 | ctx->synchronize(); | ||
| 3206 | |||
| 3207 | return static_cast<uint32_t>(ctx->get_sampled_probs_count(i)); | ||
| 3208 | } | ||
| 3209 | |||
| 3210 | // llama adapter API | ||
| 3211 | |||
| 3212 | int32_t llama_set_adapter_lora( | ||
| 3213 | llama_context * ctx, | ||
| 3214 | llama_adapter_lora * adapter, | ||
| 3215 | float scale) { | ||
| 3216 | ctx->set_adapter_lora(adapter, scale); | ||
| 3217 | |||
| 3218 | return 0; | ||
| 3219 | } | ||
| 3220 | |||
| 3221 | int32_t llama_rm_adapter_lora( | ||
| 3222 | llama_context * ctx, | ||
| 3223 | llama_adapter_lora * adapter) { | ||
| 3224 | bool res = ctx->rm_adapter_lora(adapter); | ||
| 3225 | |||
| 3226 | return res ? 0 : -1; | ||
| 3227 | } | ||
| 3228 | |||
| 3229 | void llama_clear_adapter_lora(llama_context * ctx) { | ||
| 3230 | ctx->clear_adapter_lora(); | ||
| 3231 | } | ||
| 3232 | |||
| 3233 | int32_t llama_apply_adapter_cvec( | ||
| 3234 | llama_context * ctx, | ||
| 3235 | const float * data, | ||
| 3236 | size_t len, | ||
| 3237 | int32_t n_embd, | ||
| 3238 | int32_t il_start, | ||
| 3239 | int32_t il_end) { | ||
| 3240 | bool res = ctx->apply_adapter_cvec(data, len, n_embd, il_start, il_end); | ||
| 3241 | |||
| 3242 | return res ? 0 : -1; | ||
| 3243 | } | ||
| 3244 | |||
| 3245 | // | ||
| 3246 | // memory | ||
| 3247 | // | ||
| 3248 | |||
| 3249 | llama_memory_t llama_get_memory(const struct llama_context * ctx) { | ||
| 3250 | return ctx->get_memory(); | ||
| 3251 | } | ||
| 3252 | |||
| 3253 | void llama_memory_clear(llama_memory_t mem, bool data) { | ||
| 3254 | if (!mem) { | ||
| 3255 | return; | ||
| 3256 | } | ||
| 3257 | |||
| 3258 | mem->clear(data); | ||
| 3259 | } | ||
| 3260 | |||
| 3261 | bool llama_memory_seq_rm( | ||
| 3262 | llama_memory_t mem, | ||
| 3263 | llama_seq_id seq_id, | ||
| 3264 | llama_pos p0, | ||
| 3265 | llama_pos p1) { | ||
| 3266 | if (!mem) { | ||
| 3267 | return true; | ||
| 3268 | } | ||
| 3269 | |||
| 3270 | return mem->seq_rm(seq_id, p0, p1); | ||
| 3271 | } | ||
| 3272 | |||
| 3273 | void llama_memory_seq_cp( | ||
| 3274 | llama_memory_t mem, | ||
| 3275 | llama_seq_id seq_id_src, | ||
| 3276 | llama_seq_id seq_id_dst, | ||
| 3277 | llama_pos p0, | ||
| 3278 | llama_pos p1) { | ||
| 3279 | if (!mem) { | ||
| 3280 | return; | ||
| 3281 | } | ||
| 3282 | |||
| 3283 | mem->seq_cp(seq_id_src, seq_id_dst, p0, p1); | ||
| 3284 | } | ||
| 3285 | |||
| 3286 | void llama_memory_seq_keep( | ||
| 3287 | llama_memory_t mem, | ||
| 3288 | llama_seq_id seq_id) { | ||
| 3289 | if (!mem) { | ||
| 3290 | return; | ||
| 3291 | } | ||
| 3292 | |||
| 3293 | mem->seq_keep(seq_id); | ||
| 3294 | } | ||
| 3295 | |||
| 3296 | void llama_memory_seq_add( | ||
| 3297 | llama_memory_t mem, | ||
| 3298 | llama_seq_id seq_id, | ||
| 3299 | llama_pos p0, | ||
| 3300 | llama_pos p1, | ||
| 3301 | llama_pos delta) { | ||
| 3302 | if (!mem) { | ||
| 3303 | return; | ||
| 3304 | } | ||
| 3305 | |||
| 3306 | mem->seq_add(seq_id, p0, p1, delta); | ||
| 3307 | } | ||
| 3308 | |||
| 3309 | void llama_memory_seq_div( | ||
| 3310 | llama_memory_t mem, | ||
| 3311 | llama_seq_id seq_id, | ||
| 3312 | llama_pos p0, | ||
| 3313 | llama_pos p1, | ||
| 3314 | int d) { | ||
| 3315 | if (!mem) { | ||
| 3316 | return; | ||
| 3317 | } | ||
| 3318 | |||
| 3319 | mem->seq_div(seq_id, p0, p1, d); | ||
| 3320 | } | ||
| 3321 | |||
| 3322 | llama_pos llama_memory_seq_pos_min( | ||
| 3323 | llama_memory_t mem, | ||
| 3324 | llama_seq_id seq_id) { | ||
| 3325 | if (!mem) { | ||
| 3326 | return -1; | ||
| 3327 | } | ||
| 3328 | |||
| 3329 | return mem->seq_pos_min(seq_id); | ||
| 3330 | } | ||
| 3331 | |||
| 3332 | llama_pos llama_memory_seq_pos_max( | ||
| 3333 | llama_memory_t mem, | ||
| 3334 | llama_seq_id seq_id) { | ||
| 3335 | if (!mem) { | ||
| 3336 | return -1; | ||
| 3337 | } | ||
| 3338 | |||
| 3339 | return mem->seq_pos_max(seq_id); | ||
| 3340 | } | ||
| 3341 | |||
| 3342 | bool llama_memory_can_shift(llama_memory_t mem) { | ||
| 3343 | if (!mem) { | ||
| 3344 | return false; | ||
| 3345 | } | ||
| 3346 | |||
| 3347 | return mem->get_can_shift(); | ||
| 3348 | } | ||
| 3349 | |||
| 3350 | // llama state API | ||
| 3351 | |||
| 3352 | // deprecated | ||
| 3353 | size_t llama_get_state_size(llama_context * ctx) { | ||
| 3354 | return llama_state_get_size(ctx); | ||
| 3355 | } | ||
| 3356 | |||
| 3357 | // deprecated | ||
| 3358 | size_t llama_copy_state_data(llama_context * ctx, uint8_t * dst) { | ||
| 3359 | return llama_state_get_data(ctx, dst, -1); | ||
| 3360 | } | ||
| 3361 | |||
| 3362 | // deprecated | ||
| 3363 | size_t llama_set_state_data(llama_context * ctx, const uint8_t * src) { | ||
| 3364 | return llama_state_set_data(ctx, src, -1); | ||
| 3365 | } | ||
| 3366 | |||
| 3367 | // deprecated | ||
| 3368 | bool llama_load_session_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { | ||
| 3369 | return llama_state_load_file(ctx, path_session, tokens_out, n_token_capacity, n_token_count_out); | ||
| 3370 | } | ||
| 3371 | |||
| 3372 | // deprecated | ||
| 3373 | bool llama_save_session_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { | ||
| 3374 | return llama_state_save_file(ctx, path_session, tokens, n_token_count); | ||
| 3375 | } | ||
| 3376 | |||
| 3377 | // Returns the *actual* size of the state. | ||
| 3378 | // Intended to be used when saving to state to a buffer. | ||
| 3379 | size_t llama_state_get_size(llama_context * ctx) { | ||
| 3380 | return ctx->state_get_size(); | ||
| 3381 | } | ||
| 3382 | |||
| 3383 | size_t llama_state_get_data(llama_context * ctx, uint8_t * dst, size_t size) { | ||
| 3384 | ctx->synchronize(); | ||
| 3385 | |||
| 3386 | return ctx->state_get_data(dst, size); | ||
| 3387 | } | ||
| 3388 | |||
| 3389 | // Sets the state reading from the specified source address | ||
| 3390 | size_t llama_state_set_data(llama_context * ctx, const uint8_t * src, size_t size) { | ||
| 3391 | ctx->synchronize(); | ||
| 3392 | |||
| 3393 | return ctx->state_set_data(src, size); | ||
| 3394 | } | ||
| 3395 | |||
| 3396 | bool llama_state_load_file(llama_context * ctx, const char * path_session, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { | ||
| 3397 | ctx->synchronize(); | ||
| 3398 | |||
| 3399 | try { | ||
| 3400 | return ctx->state_load_file(path_session, tokens_out, n_token_capacity, n_token_count_out); | ||
| 3401 | } catch (const std::exception & err) { | ||
| 3402 | LLAMA_LOG_ERROR("%s: error loading session file: %s\n", __func__, err.what()); | ||
| 3403 | return false; | ||
| 3404 | } | ||
| 3405 | } | ||
| 3406 | |||
| 3407 | bool llama_state_save_file(llama_context * ctx, const char * path_session, const llama_token * tokens, size_t n_token_count) { | ||
| 3408 | ctx->synchronize(); | ||
| 3409 | |||
| 3410 | try { | ||
| 3411 | return ctx->state_save_file(path_session, tokens, n_token_count); | ||
| 3412 | } catch (const std::exception & err) { | ||
| 3413 | LLAMA_LOG_ERROR("%s: error saving session file: %s\n", __func__, err.what()); | ||
| 3414 | return false; | ||
| 3415 | } | ||
| 3416 | } | ||
| 3417 | |||
| 3418 | size_t llama_state_seq_get_size(llama_context * ctx, llama_seq_id seq_id) { | ||
| 3419 | return llama_state_seq_get_size_ext(ctx, seq_id, 0); | ||
| 3420 | } | ||
| 3421 | |||
| 3422 | size_t llama_state_seq_get_data(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id) { | ||
| 3423 | return llama_state_seq_get_data_ext(ctx, dst, size, seq_id, 0); | ||
| 3424 | } | ||
| 3425 | |||
| 3426 | size_t llama_state_seq_set_data(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id) { | ||
| 3427 | return llama_state_seq_set_data_ext(ctx, src, size, seq_id, 0); | ||
| 3428 | } | ||
| 3429 | |||
| 3430 | size_t llama_state_seq_get_size_ext(llama_context * ctx, llama_seq_id seq_id, llama_state_seq_flags flags) { | ||
| 3431 | return ctx->state_seq_get_size(seq_id, flags); | ||
| 3432 | } | ||
| 3433 | |||
| 3434 | size_t llama_state_seq_get_data_ext(llama_context * ctx, uint8_t * dst, size_t size, llama_seq_id seq_id, llama_state_seq_flags flags) { | ||
| 3435 | ctx->synchronize(); | ||
| 3436 | |||
| 3437 | return ctx->state_seq_get_data(seq_id, dst, size, flags); | ||
| 3438 | } | ||
| 3439 | |||
| 3440 | size_t llama_state_seq_set_data_ext(llama_context * ctx, const uint8_t * src, size_t size, llama_seq_id seq_id, llama_state_seq_flags flags) { | ||
| 3441 | ctx->synchronize(); | ||
| 3442 | |||
| 3443 | return ctx->state_seq_set_data(seq_id, src, size, flags); | ||
| 3444 | } | ||
| 3445 | |||
| 3446 | size_t llama_state_seq_save_file(llama_context * ctx, const char * filepath, llama_seq_id seq_id, const llama_token * tokens, size_t n_token_count) { | ||
| 3447 | ctx->synchronize(); | ||
| 3448 | |||
| 3449 | try { | ||
| 3450 | return ctx->state_seq_save_file(seq_id, filepath, tokens, n_token_count); | ||
| 3451 | } catch (const std::exception & err) { | ||
| 3452 | LLAMA_LOG_ERROR("%s: error saving sequence state file: %s\n", __func__, err.what()); | ||
| 3453 | return 0; | ||
| 3454 | } | ||
| 3455 | } | ||
| 3456 | |||
| 3457 | size_t llama_state_seq_load_file(llama_context * ctx, const char * filepath, llama_seq_id dest_seq_id, llama_token * tokens_out, size_t n_token_capacity, size_t * n_token_count_out) { | ||
| 3458 | ctx->synchronize(); | ||
| 3459 | |||
| 3460 | try { | ||
| 3461 | return ctx->state_seq_load_file(dest_seq_id, filepath, tokens_out, n_token_capacity, n_token_count_out); | ||
| 3462 | } catch (const std::exception & err) { | ||
| 3463 | LLAMA_LOG_ERROR("%s: error loading sequence state file: %s\n", __func__, err.what()); | ||
| 3464 | return 0; | ||
| 3465 | } | ||
| 3466 | } | ||
| 3467 | |||
| 3468 | /// | ||
| 3469 | |||
| 3470 | int32_t llama_encode( | ||
| 3471 | llama_context * ctx, | ||
| 3472 | llama_batch batch) { | ||
| 3473 | const int ret = ctx->encode(batch); | ||
| 3474 | if (ret != 0) { | ||
| 3475 | LLAMA_LOG_ERROR("%s: failed to encode, ret = %d\n", __func__, ret); | ||
| 3476 | } | ||
| 3477 | |||
| 3478 | return ret; | ||
| 3479 | } | ||
| 3480 | |||
| 3481 | int32_t llama_decode( | ||
| 3482 | llama_context * ctx, | ||
| 3483 | llama_batch batch) { | ||
| 3484 | const int ret = ctx->decode(batch); | ||
| 3485 | if (ret != 0 && ret != 1) { | ||
| 3486 | LLAMA_LOG_ERROR("%s: failed to decode, ret = %d\n", __func__, ret); | ||
| 3487 | } | ||
| 3488 | |||
| 3489 | return ret; | ||
| 3490 | } | ||
| 3491 | |||
| 3492 | // | ||
| 3493 | // perf | ||
| 3494 | // | ||
| 3495 | |||
| 3496 | llama_perf_context_data llama_perf_context(const llama_context * ctx) { | ||
| 3497 | llama_perf_context_data data = {}; | ||
| 3498 | |||
| 3499 | if (ctx == nullptr) { | ||
| 3500 | return data; | ||
| 3501 | } | ||
| 3502 | |||
| 3503 | data = ctx->perf_get_data(); | ||
| 3504 | |||
| 3505 | return data; | ||
| 3506 | } | ||
| 3507 | |||
| 3508 | void llama_perf_context_print(const llama_context * ctx) { | ||
| 3509 | const auto data = llama_perf_context(ctx); | ||
| 3510 | |||
| 3511 | const double t_end_ms = 1e-3 * ggml_time_us(); | ||
| 3512 | |||
| 3513 | LLAMA_LOG_INFO("%s: load time = %10.2f ms\n", __func__, data.t_load_ms); | ||
| 3514 | LLAMA_LOG_INFO("%s: prompt eval time = %10.2f ms / %5d tokens (%8.2f ms per token, %8.2f tokens per second)\n", | ||
| 3515 | __func__, data.t_p_eval_ms, data.n_p_eval, data.t_p_eval_ms / data.n_p_eval, 1e3 / data.t_p_eval_ms * data.n_p_eval); | ||
| 3516 | LLAMA_LOG_INFO("%s: eval time = %10.2f ms / %5d runs (%8.2f ms per token, %8.2f tokens per second)\n", | ||
| 3517 | __func__, data.t_eval_ms, data.n_eval, data.t_eval_ms / data.n_eval, 1e3 / data.t_eval_ms * data.n_eval); | ||
| 3518 | LLAMA_LOG_INFO("%s: total time = %10.2f ms / %5d tokens\n", __func__, (t_end_ms - data.t_start_ms), (data.n_p_eval + data.n_eval)); | ||
| 3519 | LLAMA_LOG_INFO("%s: graphs reused = %10d\n", __func__, data.n_reused); | ||
| 3520 | } | ||
| 3521 | |||
| 3522 | void llama_perf_context_reset(llama_context * ctx) { | ||
| 3523 | ctx->perf_reset(); | ||
| 3524 | } | ||
| 3525 | |||
| 3526 | void llama_memory_breakdown_print(const struct llama_context * ctx) { | ||
| 3527 | const std::vector<ggml_backend_dev_t> & devices = ctx->get_model().devices; | ||
| 3528 | |||
| 3529 | std::map<ggml_backend_buffer_type_t, llama_memory_breakdown_data> memory_breakdown = ctx->memory_breakdown(); | ||
| 3530 | |||
| 3531 | std::vector<std::array<std::string, 9>> table_data; | ||
| 3532 | table_data.reserve(devices.size()); | ||
| 3533 | const std::string template_header = "%s: | %s | %s %s %s %s %s %s %s |\n"; | ||
| 3534 | const std::string template_gpu = "%s: | %s | %s = %s + (%s = %s + %s + %s) + %s |\n"; | ||
| 3535 | const std::string template_other = "%s: | %s | %s %s %s = %s + %s + %s %s |\n"; | ||
| 3536 | |||
| 3537 | table_data.push_back({template_header, "memory breakdown [MiB]", "total", "free", "self", "model", "context", "compute", "unaccounted"}); | ||
| 3538 | |||
| 3539 | constexpr size_t MiB = 1024 * 1024; | ||
| 3540 | const std::vector<std::string> desc_prefixes_strip = {"NVIDIA ", "GeForce ", "Tesla ", "AMD ", "Radeon ", "Instinct "}; | ||
| 3541 | |||
| 3542 | // track seen buffer types to avoid double counting: | ||
| 3543 | std::set<ggml_backend_buffer_type_t> seen_buffer_types; | ||
| 3544 | |||
| 3545 | // accumulative memory breakdown for each device and for host: | ||
| 3546 | std::vector<llama_memory_breakdown_data> mb_dev(devices.size()); | ||
| 3547 | llama_memory_breakdown_data mb_host; | ||
| 3548 | |||
| 3549 | for (const auto & buft_mb : memory_breakdown) { | ||
| 3550 | ggml_backend_buffer_type_t buft = buft_mb.first; | ||
| 3551 | const llama_memory_breakdown_data & mb = buft_mb.second; | ||
| 3552 | if (ggml_backend_buft_is_host(buft)) { | ||
| 3553 | mb_host.model += mb.model; | ||
| 3554 | mb_host.context += mb.context; | ||
| 3555 | mb_host.compute += mb.compute; | ||
| 3556 | seen_buffer_types.insert(buft); | ||
| 3557 | continue; | ||
| 3558 | } | ||
| 3559 | ggml_backend_dev_t dev = ggml_backend_buft_get_device(buft); | ||
| 3560 | if (dev) { | ||
| 3561 | int i_dev = -1; | ||
| 3562 | for (size_t i = 0; i < devices.size(); i++) { | ||
| 3563 | if (devices[i] == dev) { | ||
| 3564 | i_dev = i; | ||
| 3565 | break; | ||
| 3566 | } | ||
| 3567 | } | ||
| 3568 | if (i_dev != -1) { | ||
| 3569 | mb_dev[i_dev].model += mb.model; | ||
| 3570 | mb_dev[i_dev].context += mb.context; | ||
| 3571 | mb_dev[i_dev].compute += mb.compute; | ||
| 3572 | seen_buffer_types.insert(buft); | ||
| 3573 | continue; | ||
| 3574 | } | ||
| 3575 | } | ||
| 3576 | } | ||
| 3577 | |||
| 3578 | // print memory breakdown for each device: | ||
| 3579 | for (size_t i = 0; i < devices.size(); i++) { | ||
| 3580 | ggml_backend_dev_t dev = devices[i]; | ||
| 3581 | llama_memory_breakdown_data mb = mb_dev[i]; | ||
| 3582 | |||
| 3583 | const std::string name = ggml_backend_dev_name(dev); | ||
| 3584 | std::string desc = ggml_backend_dev_description(dev); | ||
| 3585 | for (const std::string & prefix : desc_prefixes_strip) { | ||
| 3586 | if (desc.length() >= prefix.length() && desc.substr(0, prefix.length()) == prefix) { | ||
| 3587 | desc = desc.substr(prefix.length()); | ||
| 3588 | } | ||
| 3589 | } | ||
| 3590 | |||
| 3591 | size_t free, total; | ||
| 3592 | ggml_backend_dev_memory(dev, &free, &total); | ||
| 3593 | |||
| 3594 | const size_t self = mb.model + mb.context + mb.compute; | ||
| 3595 | const size_t unaccounted = total - self - free; | ||
| 3596 | |||
| 3597 | table_data.push_back({ | ||
| 3598 | template_gpu, | ||
| 3599 | " - " + name + " (" + desc + ")", | ||
| 3600 | std::to_string(total / MiB), | ||
| 3601 | std::to_string(free / MiB), | ||
| 3602 | std::to_string(self / MiB), | ||
| 3603 | std::to_string(mb.model / MiB), | ||
| 3604 | std::to_string(mb.context / MiB), | ||
| 3605 | std::to_string(mb.compute / MiB), | ||
| 3606 | std::to_string(unaccounted / MiB)}); | ||
| 3607 | } | ||
| 3608 | |||
| 3609 | // print memory breakdown for host: | ||
| 3610 | { | ||
| 3611 | const size_t self = mb_host.model + mb_host.context + mb_host.compute; | ||
| 3612 | table_data.push_back({ | ||
| 3613 | template_other, | ||
| 3614 | " - Host", | ||
| 3615 | "", // total | ||
| 3616 | "", // free | ||
| 3617 | std::to_string(self / MiB), | ||
| 3618 | std::to_string(mb_host.model / MiB), | ||
| 3619 | std::to_string(mb_host.context / MiB), | ||
| 3620 | std::to_string(mb_host.compute / MiB), | ||
| 3621 | ""}); // unaccounted | ||
| 3622 | } | ||
| 3623 | |||
| 3624 | // print memory breakdown for all remaining buffer types: | ||
| 3625 | for (const auto & buft_mb : memory_breakdown) { | ||
| 3626 | ggml_backend_buffer_type_t buft = buft_mb.first; | ||
| 3627 | const llama_memory_breakdown_data & mb = buft_mb.second; | ||
| 3628 | if (seen_buffer_types.count(buft) == 1) { | ||
| 3629 | continue; | ||
| 3630 | } | ||
| 3631 | const std::string name = ggml_backend_buft_name(buft); | ||
| 3632 | const size_t self = mb.model + mb.context + mb.compute; | ||
| 3633 | table_data.push_back({ | ||
| 3634 | template_other, | ||
| 3635 | " - " + name, | ||
| 3636 | "", // total | ||
| 3637 | "", // free | ||
| 3638 | std::to_string(self / MiB), | ||
| 3639 | std::to_string(mb.model / MiB), | ||
| 3640 | std::to_string(mb.context / MiB), | ||
| 3641 | std::to_string(mb.compute / MiB), | ||
| 3642 | ""}); // unaccounted | ||
| 3643 | seen_buffer_types.insert(buft); | ||
| 3644 | } | ||
| 3645 | |||
| 3646 | for (size_t j = 1; j < table_data[0].size(); j++) { | ||
| 3647 | size_t max_len = 0; | ||
| 3648 | for (const auto & td : table_data) { | ||
| 3649 | max_len = std::max(max_len, td[j].length()); | ||
| 3650 | } | ||
| 3651 | for (auto & td : table_data) { | ||
| 3652 | td[j].insert(j == 1 ? td[j].length() : 0, max_len - td[j].length(), ' '); | ||
| 3653 | } | ||
| 3654 | } | ||
| 3655 | for (const auto & td : table_data) { | ||
| 3656 | LLAMA_LOG_INFO(td[0].c_str(), | ||
| 3657 | __func__, td[1].c_str(), td[2].c_str(), td[3].c_str(), td[4].c_str(), td[5].c_str(), | ||
| 3658 | td[6].c_str(), td[7].c_str(), td[8].c_str()); | ||
| 3659 | } | ||
| 3660 | } | ||
| 3661 | |||
| 3662 | // | ||
| 3663 | // training | ||
| 3664 | // | ||
| 3665 | |||
| 3666 | bool llama_opt_param_filter_all(const struct ggml_tensor * tensor, void * userdata) { | ||
| 3667 | GGML_UNUSED(tensor); | ||
| 3668 | GGML_UNUSED(userdata); | ||
| 3669 | return true; | ||
| 3670 | } | ||
| 3671 | |||
| 3672 | void llama_opt_init(struct llama_context * ctx, struct llama_model * model, struct llama_opt_params lopt_params) { | ||
| 3673 | ctx->opt_init(model, lopt_params); | ||
| 3674 | } | ||
| 3675 | |||
| 3676 | void llama_opt_epoch( | ||
| 3677 | struct llama_context * ctx, | ||
| 3678 | ggml_opt_dataset_t dataset, | ||
| 3679 | ggml_opt_result_t result_train, | ||
| 3680 | ggml_opt_result_t result_eval, | ||
| 3681 | int64_t idata_split, | ||
| 3682 | ggml_opt_epoch_callback callback_train, | ||
| 3683 | ggml_opt_epoch_callback callback_eval) { | ||
| 3684 | ctx->opt_epoch( | ||
| 3685 | dataset, | ||
| 3686 | result_train, | ||
| 3687 | result_eval, | ||
| 3688 | idata_split, | ||
| 3689 | callback_train, | ||
| 3690 | callback_eval); | ||
| 3691 | } | ||
