summaryrefslogtreecommitdiff
path: root/llama.cpp/tools/batched-bench
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/tools/batched-bench
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/tools/batched-bench')
-rw-r--r--llama.cpp/tools/batched-bench/CMakeLists.txt8
-rw-r--r--llama.cpp/tools/batched-bench/README.md60
-rw-r--r--llama.cpp/tools/batched-bench/batched-bench.cpp256
3 files changed, 324 insertions, 0 deletions
diff --git a/llama.cpp/tools/batched-bench/CMakeLists.txt b/llama.cpp/tools/batched-bench/CMakeLists.txt
new file mode 100644
index 0000000..4a46b57
--- /dev/null
+++ b/llama.cpp/tools/batched-bench/CMakeLists.txt
@@ -0,0 +1,8 @@
+set(TARGET llama-batched-bench)
+add_executable(${TARGET} batched-bench.cpp)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
+
+if(LLAMA_TOOLS_INSTALL)
+ install(TARGETS ${TARGET} RUNTIME)
+endif()
diff --git a/llama.cpp/tools/batched-bench/README.md b/llama.cpp/tools/batched-bench/README.md
new file mode 100644
index 0000000..df67c47
--- /dev/null
+++ b/llama.cpp/tools/batched-bench/README.md
@@ -0,0 +1,60 @@
+# llama.cpp/example/batched-bench
+
+Benchmark the batched decoding performance of `llama.cpp`
+
+## Usage
+
+There are 2 modes of operation:
+
+- `prompt not shared` - each batch has a separate prompt of size `PP` (i.e. `N_KV = B*(PP + TG)`)
+- `prompt is shared` - there is a common prompt of size `PP` used by all batches (i.e. `N_KV = PP + B*TG`)
+
+```bash
+./llama-batched-bench -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]
+
+# LLaMA 7B, F16, N_KV_MAX = 16384 (8GB), prompt not shared
+./llama-batched-bench -m ./models/llama-7b/ggml-model-f16.gguf -c 16384 -b 2048 -ub 512 -ngl 99
+
+# LLaMA 7B, Q8_0, N_KV_MAX = 16384 (8GB), prompt is shared
+./llama-batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 16384 -b 2048 -ub 512 -ngl 99 -pps
+
+# custom set of batches
+./llama-batched-bench -m ./models/llama-7b/ggml-model-q8_0.gguf -c 2048 -b 512 -ub 512 -ngl 999 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32
+```
+
+## Sample results
+
+- `PP` - prompt tokens per batch
+- `TG` - generated tokens per batch
+- `B` - number of batches
+- `N_KV` - required KV cache size
+- `T_PP` - prompt processing time (i.e. time to first token)
+- `S_PP` - prompt processing speed (`(B*PP)/T_PP` or `PP/T_PP`)
+- `T_TG` - time to generate all batches
+- `S_TG` - text generation speed (`(B*TG)/T_TG`)
+- `T` - total time
+- `S` - total speed (i.e. all tokens / total time)
+
+| PP | TG | B | N_KV | T_PP s | S_PP t/s | T_TG s | S_TG t/s | T s | S t/s |
+|-------|--------|------|--------|----------|----------|----------|----------|----------|----------|
+| 128 | 128 | 1 | 256 | 0.108 | 1186.64 | 3.079 | 41.57 | 3.187 | 80.32 |
+| 128 | 128 | 2 | 512 | 0.198 | 1295.19 | 5.029 | 50.90 | 5.227 | 97.95 |
+| 128 | 128 | 4 | 1024 | 0.373 | 1373.96 | 6.878 | 74.44 | 7.251 | 141.23 |
+| 128 | 128 | 8 | 2048 | 0.751 | 1363.27 | 7.344 | 139.43 | 8.095 | 252.99 |
+| 128 | 128 | 16 | 4096 | 1.570 | 1304.68 | 8.455 | 242.23 | 10.024 | 408.60 |
+| 128 | 128 | 32 | 8192 | 3.408 | 1201.73 | 8.801 | 465.40 | 12.209 | 670.96 |
+| 128 | 256 | 1 | 384 | 0.107 | 1196.70 | 6.329 | 40.45 | 6.436 | 59.67 |
+| 128 | 256 | 2 | 768 | 0.194 | 1317.45 | 10.239 | 50.00 | 10.433 | 73.61 |
+| 128 | 256 | 4 | 1536 | 0.366 | 1399.03 | 13.960 | 73.35 | 14.326 | 107.22 |
+| 128 | 256 | 8 | 3072 | 0.751 | 1363.92 | 15.110 | 135.54 | 15.861 | 193.69 |
+| 128 | 256 | 16 | 6144 | 1.569 | 1304.93 | 18.073 | 226.64 | 19.642 | 312.80 |
+| 128 | 256 | 32 | 12288 | 3.409 | 1201.35 | 19.223 | 426.15 | 22.633 | 542.93 |
+
+### JSONL output
+
+Pass `--output-format jsonl` to output JSONL instead of Markdown, รก la
+
+```json lines
+{"n_kv_max": 2048, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "is_pp_shared": 0, "n_gpu_layers": 99, "n_threads": 8, "n_threads_batch": 8, "pp": 128, "tg": 128, "pl": 1, "n_kv": 256, "t_pp": 0.233810, "speed_pp": 547.453064, "t_tg": 3.503684, "speed_tg": 36.532974, "t": 3.737494, "speed": 68.495094}
+{"n_kv_max": 2048, "n_batch": 2048, "n_ubatch": 512, "flash_attn": 0, "is_pp_shared": 0, "n_gpu_layers": 99, "n_threads": 8, "n_threads_batch": 8, "pp": 128, "tg": 128, "pl": 2, "n_kv": 512, "t_pp": 0.422602, "speed_pp": 605.770935, "t_tg": 11.106112, "speed_tg": 23.050371, "t": 11.528713, "speed": 44.410854}
+```
diff --git a/llama.cpp/tools/batched-bench/batched-bench.cpp b/llama.cpp/tools/batched-bench/batched-bench.cpp
new file mode 100644
index 0000000..0f627c5
--- /dev/null
+++ b/llama.cpp/tools/batched-bench/batched-bench.cpp
@@ -0,0 +1,256 @@
+#include "arg.h"
+#include "common.h"
+#include "log.h"
+#include "llama.h"
+
+#include <algorithm>
+#include <cstdio>
+#include <string>
+#include <vector>
+
+static void print_usage(int, char ** argv) {
+ LOG("\nexample usage:\n");
+ LOG("\n %s -m model.gguf -c 2048 -b 2048 -ub 512 -npp 128,256,512 -ntg 128,256 -npl 1,2,4,8,16,32 [-pps]\n", argv[0]);
+ LOG("\n");
+}
+
+int main(int argc, char ** argv) {
+ common_params params;
+
+ if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_BENCH, print_usage)) {
+ return 1;
+ }
+
+ common_init();
+
+ int is_pp_shared = params.is_pp_shared;
+ int is_tg_separate = params.is_tg_separate;
+
+ std::vector<int> n_pp = params.n_pp;
+ std::vector<int> n_tg = params.n_tg;
+ std::vector<int> n_pl = params.n_pl;
+
+ // init LLM
+
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ // initialize the model
+
+ llama_model_params model_params = common_model_params_to_llama(params);
+
+ llama_model * model = llama_model_load_from_file(params.model.path.c_str(), model_params);
+
+ if (model == NULL) {
+ fprintf(stderr , "%s: error: unable to load model\n" , __func__);
+ return 1;
+ }
+
+ llama_context_params ctx_params = common_context_params_to_llama(params);
+
+ // ensure enough sequences are available
+ ctx_params.n_seq_max = n_pl.empty() ? 1 : *std::max_element(n_pl.begin(), n_pl.end());
+
+ llama_context * ctx = llama_init_from_model(model, ctx_params);
+
+ if (ctx == NULL) {
+ fprintf(stderr , "%s: error: failed to create the llama_context\n" , __func__);
+ llama_model_free(model);
+ return 1;
+ }
+
+ const llama_vocab * vocab = llama_model_get_vocab(model);
+ const int32_t n_vocab = llama_vocab_n_tokens(vocab);
+
+ const auto get_token_rand = [n_vocab]() -> llama_token {
+ return std::rand() % n_vocab;
+ };
+
+ auto * mem = llama_get_memory(ctx);
+
+ const int32_t n_kv_max = llama_n_ctx(ctx);
+
+ llama_batch batch = llama_batch_init(n_kv_max, 0, 1);
+
+ // decode in batches of ctx_params.n_batch tokens
+ auto decode_helper = [](llama_context * ctx, llama_batch & batch, int32_t n_batch, bool synchronize) {
+ for (int32_t i = 0; i < batch.n_tokens; i += n_batch) {
+ const int32_t n_tokens = std::min(n_batch, batch.n_tokens - i);
+
+ llama_batch batch_view = {
+ n_tokens,
+ batch.token + i,
+ nullptr,
+ batch.pos + i,
+ batch.n_seq_id + i,
+ batch.seq_id + i,
+ batch.logits + i,
+ };
+
+ const int ret = llama_decode(ctx, batch_view);
+ if (ret != 0) {
+ LOG_ERR("failed to decode the batch, n_batch = %d, ret = %d\n", n_batch, ret);
+ return false;
+ }
+
+ if (synchronize) {
+ llama_synchronize(ctx);
+ }
+ }
+
+ return true;
+ };
+
+ // warm up
+ {
+ for (int i = 0; i < 16; ++i) {
+ common_batch_add(batch, get_token_rand(), i, { 0 }, false);
+ }
+
+ if (!decode_helper(ctx, batch, ctx_params.n_batch, true)) {
+ LOG_ERR("%s: llama_decode() failed\n", __func__);
+ llama_free(ctx);
+ llama_model_free(model);
+ return 1;
+ }
+ }
+
+ if (!params.batched_bench_output_jsonl) {
+ LOG("\n");
+ LOG("%s: n_kv_max = %d, n_batch = %d, n_ubatch = %d, flash_attn = %d, is_pp_shared = %d, is_tg_separate = %d, n_gpu_layers = %d, n_threads = %u, n_threads_batch = %u\n", __func__, n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), is_pp_shared, is_tg_separate, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch);
+ LOG("\n");
+ LOG("|%6s | %6s | %4s | %6s | %8s | %8s | %8s | %8s | %8s | %8s |\n", "PP", "TG", "B", "N_KV", "T_PP s", "S_PP t/s", "T_TG s", "S_TG t/s", "T s", "S t/s");
+ LOG("|%6s-|-%6s-|-%4s-|-%6s-|-%8s-|-%8s-|-%8s-|-%8s-|-%8s-|-%8s-|\n", "------", "------", "----", "------", "--------", "--------", "--------", "--------", "--------", "--------");
+ }
+
+ for ( int i_pp = 0; i_pp < (int) n_pp.size(); ++i_pp) {
+ for ( int i_tg = 0; i_tg < (int) n_tg.size(); ++i_tg) {
+ for (int i_pl = 0; i_pl < (int) n_pl.size(); ++i_pl) {
+ const int pp = n_pp[i_pp];
+ const int tg = n_tg[i_tg];
+ const int pl = n_pl[i_pl];
+
+ const int n_ctx_req = is_pp_shared ? (params.kv_unified ? pp : pl*pp) + pl*tg : pl*(pp + tg);
+
+ if (n_ctx_req > n_kv_max) {
+ continue;
+ }
+
+ common_batch_clear(batch);
+
+ for (int j = 0; j < (is_pp_shared ? 1 : pl); ++j) {
+ for (int i = 0; i < pp; ++i) {
+ common_batch_add(batch, get_token_rand(), i, { j }, i == pp - 1);
+ }
+ }
+
+ llama_memory_clear(mem, false);
+
+ const auto t_pp_start = ggml_time_us();
+
+ if (!decode_helper(ctx, batch, ctx_params.n_batch, false)) {
+ LOG_ERR("%s: llama_decode() failed\n", __func__);
+ llama_free(ctx);
+ llama_model_free(model);
+ return 1;
+ }
+
+ llama_synchronize(ctx);
+
+ const auto t_pp_end = ggml_time_us();
+
+ if (is_pp_shared) {
+ for (int32_t i = 1; i < pl; ++i) {
+ llama_memory_seq_cp(mem, 0, i, -1, -1);
+ }
+
+ if (!params.kv_unified) {
+ // run one dummy token to apply the memory copy
+ common_batch_clear(batch);
+ common_batch_add(batch, get_token_rand(), pp + 0, { 0 }, true);
+ if (!decode_helper(ctx, batch, ctx_params.n_batch, true)) {
+ LOG_ERR("%s: llama_decode() failed\n", __func__);
+ llama_free(ctx);
+ llama_model_free(model);
+ return 1;
+ }
+ llama_memory_seq_rm(mem, 0, pp, -1);
+ }
+ }
+
+ const auto t_tg_start = ggml_time_us();
+
+ if (is_tg_separate) {
+ // decode pattern:
+ // 0 0 0 ... 1 1 1 ... 2 2 2 ... 3 3 3 ...
+ for (int j = 0; j < pl; ++j) {
+ for (int i = 0; i < tg; ++i) {
+ common_batch_clear(batch);
+
+ common_batch_add(batch, get_token_rand(), pp + i, { j }, true);
+
+ if (!decode_helper(ctx, batch, ctx_params.n_batch, true)) {
+ LOG_ERR("%s: llama_decode() failed\n", __func__);
+ llama_free(ctx);
+ llama_model_free(model);
+ return 1;
+ }
+ }
+ }
+ } else {
+ // decode pattern:
+ // 0123 0123 0123 ...
+ for (int i = 0; i < tg; ++i) {
+ common_batch_clear(batch);
+
+ for (int j = 0; j < pl; ++j) {
+ common_batch_add(batch, get_token_rand(), pp + i, { j }, true);
+ }
+
+ if (!decode_helper(ctx, batch, ctx_params.n_batch, true)) {
+ LOG_ERR("%s: llama_decode() failed\n", __func__);
+ llama_free(ctx);
+ llama_model_free(model);
+ return 1;
+ }
+ }
+ }
+
+ const auto t_tg_end = ggml_time_us();
+
+ const int32_t n_kv = n_ctx_req;
+
+ const float t_pp = (t_pp_end - t_pp_start) / 1000000.0f;
+ const float t_tg = (t_tg_end - t_tg_start) / 1000000.0f;
+ const float t = t_pp + t_tg;
+
+ const float speed_pp = is_pp_shared ? pp / t_pp : pl*pp / t_pp;
+ const float speed_tg = pl*tg / t_tg;
+ const float speed = ((is_pp_shared ? pp : pl*pp) + pl*tg) / t;
+
+ if(params.batched_bench_output_jsonl) {
+ LOG(
+ "{\"n_kv_max\": %d, \"n_batch\": %d, \"n_ubatch\": %d, \"flash_attn\": %d, \"is_pp_shared\": %d, \"n_gpu_layers\": %d, \"n_threads\": %u, \"n_threads_batch\": %u, "
+ "\"pp\": %d, \"tg\": %d, \"pl\": %d, \"n_kv\": %d, \"t_pp\": %f, \"speed_pp\": %f, \"t_tg\": %f, \"speed_tg\": %f, \"t\": %f, \"speed\": %f}\n",
+ n_kv_max, params.n_batch, params.n_ubatch, int(params.flash_attn_type), params.is_pp_shared, params.n_gpu_layers, ctx_params.n_threads, ctx_params.n_threads_batch,
+ pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed
+ );
+ } else {
+ LOG("|%6d | %6d | %4d | %6d | %8.3f | %8.2f | %8.3f | %8.2f | %8.3f | %8.2f |\n", pp, tg, pl, n_kv, t_pp, speed_pp, t_tg, speed_tg, t, speed);
+ }
+ }
+ }
+ }
+
+ LOG("\n");
+ llama_perf_context_print(ctx);
+
+ llama_batch_free(batch);
+
+ llama_free(ctx);
+ llama_model_free(model);
+
+ llama_backend_free();
+
+ return 0;
+}