summaryrefslogtreecommitdiff
path: root/llama.cpp/examples/speculative-simple
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp/examples/speculative-simple')
-rw-r--r--llama.cpp/examples/speculative-simple/CMakeLists.txt5
-rw-r--r--llama.cpp/examples/speculative-simple/README.md12
-rw-r--r--llama.cpp/examples/speculative-simple/speculative-simple.cpp266
3 files changed, 283 insertions, 0 deletions
diff --git a/llama.cpp/examples/speculative-simple/CMakeLists.txt b/llama.cpp/examples/speculative-simple/CMakeLists.txt
new file mode 100644
index 0000000..aeaea74
--- /dev/null
+++ b/llama.cpp/examples/speculative-simple/CMakeLists.txt
@@ -0,0 +1,5 @@
+set(TARGET llama-speculative-simple)
+add_executable(${TARGET} speculative-simple.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
diff --git a/llama.cpp/examples/speculative-simple/README.md b/llama.cpp/examples/speculative-simple/README.md
new file mode 100644
index 0000000..e3a6c6b
--- /dev/null
+++ b/llama.cpp/examples/speculative-simple/README.md
@@ -0,0 +1,12 @@
+# llama.cpp/examples/speculative-simple
+
+Demonstration of basic greedy speculative decoding
+
+```bash
+./bin/llama-speculative-simple \
+ -m ../models/qwen2.5-32b-coder-instruct/ggml-model-q8_0.gguf \
+ -md ../models/qwen2.5-1.5b-coder-instruct/ggml-model-q4_0.gguf \
+ -f test.txt -c 0 -ngl 99 --color \
+ --sampling-seq k --top-k 1 -fa --temp 0.0 \
+ -ngld 99 --draft-max 16 --draft-min 5 --draft-p-min 0.9
+```
diff --git a/llama.cpp/examples/speculative-simple/speculative-simple.cpp b/llama.cpp/examples/speculative-simple/speculative-simple.cpp
new file mode 100644
index 0000000..d8b1f5a
--- /dev/null
+++ b/llama.cpp/examples/speculative-simple/speculative-simple.cpp
@@ -0,0 +1,266 @@
+#include "arg.h"
+#include "common.h"
+#include "sampling.h"
+#include "speculative.h"
+#include "log.h"
+#include "llama.h"
+
+#include <cstdio>
+#include <cstring>
+#include <string>
+#include <vector>
+
+int main(int argc, char ** argv) {
+ common_params params;
+
+ if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_SPECULATIVE)) {
+ return 1;
+ }
+
+ if (params.n_predict < -1) {
+ LOG_ERR("%s: --n-predict must be >= -1\n", __func__);
+ return 1;
+ }
+
+ common_init();
+
+ if (params.speculative.mparams_dft.path.empty()) {
+ LOG_ERR("%s: --model-draft is required\n", __func__);
+ return 1;
+ }
+
+ // init llama.cpp
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ llama_model * model_tgt = NULL;
+
+ llama_context * ctx_tgt = NULL;
+
+ // load the target model
+ auto llama_init_tgt = common_init_from_params(params);
+
+ model_tgt = llama_init_tgt->model();
+ ctx_tgt = llama_init_tgt->context();
+
+ const llama_vocab * vocab = llama_model_get_vocab(model_tgt);
+
+ // load the draft model
+ llama_model_ptr model_dft;
+
+ // TODO: simplify this logic
+ {
+ const auto & params_spec = params.speculative;
+
+ auto params_dft = params;
+
+ params_dft.n_parallel = 1;
+ params_dft.n_ctx = params_spec.n_ctx;
+ params_dft.n_batch = llama_n_ctx_seq(ctx_tgt);
+ params_dft.devices = params_spec.devices;
+ params_dft.model = params_spec.mparams_dft;
+ params_dft.n_gpu_layers = params_spec.n_gpu_layers;
+
+ if (params_spec.cpuparams.n_threads > 0) {
+ params_dft.cpuparams.n_threads = params.speculative.cpuparams.n_threads;
+ params_dft.cpuparams_batch.n_threads = params.speculative.cpuparams_batch.n_threads;
+ }
+
+ params_dft.tensor_buft_overrides = params.speculative.tensor_buft_overrides;
+
+ auto mparams_dft = common_model_params_to_llama(params_dft);
+
+ model_dft.reset(llama_model_load_from_file(params_dft.model.path.c_str(), mparams_dft));
+ if (model_dft == nullptr) {
+ LOG_ERR("failed to load draft model, '%s'\n", params_dft.model.path.c_str());
+ return 1;
+ }
+
+ params.speculative.model_dft = model_dft.get();
+ params.speculative.cparams_dft = common_context_params_to_llama(params_dft);
+ }
+
+ // Tokenize the prompt
+ std::vector<llama_token> inp;
+ inp = common_tokenize(ctx_tgt, params.prompt, true, true);
+
+ if (llama_n_ctx(ctx_tgt) < (uint32_t) inp.size()) {
+ LOG_ERR("%s: the prompt exceeds the context size (%d tokens, ctx %d)\n", __func__, (int) inp.size(), llama_n_ctx(ctx_tgt));
+
+ return 1;
+ }
+
+ if (llama_n_batch(ctx_tgt) < (uint32_t) inp.size()) {
+ LOG_ERR("%s: the prompt exceeds the batch size (%d tokens, batch %d)\n", __func__, (int) inp.size(), llama_n_batch(ctx_tgt));
+
+ return 1;
+ }
+
+ LOG("\n\n");
+
+ for (auto id : inp) {
+ LOG("%s", common_token_to_piece(ctx_tgt, id).c_str());
+ }
+
+ int n_predict = 0;
+ int n_drafted = 0;
+ int n_accept = 0;
+
+ // used to determine end of generation
+ bool has_eos = false;
+
+ // ================================================
+ // everything until here is standard initialization
+ // the relevant stuff for speculative decoding starts here
+
+ const auto t_enc_start = ggml_time_us();
+
+ // target model sampling context
+ struct common_sampler * smpl = common_sampler_init(model_tgt, params.sampling);
+
+ // eval the prompt
+ llama_decode(ctx_tgt, llama_batch_get_one(inp.data(), inp.size() - 1));
+
+ // note: keep the last token separate!
+ llama_token id_last = inp.back();
+
+ // all tokens currently in the target context
+ llama_tokens prompt_tgt(inp.begin(), inp.end() - 1);
+ prompt_tgt.reserve(llama_n_ctx(ctx_tgt));
+
+ int n_past = inp.size() - 1;
+
+ // init the speculator
+ const auto & params_spec = params.speculative;
+
+ struct common_speculative * spec = common_speculative_init(params.speculative, ctx_tgt);
+
+ common_speculative_begin(spec, prompt_tgt);
+
+ llama_batch batch_tgt = llama_batch_init(llama_n_batch(ctx_tgt), 0, 1);
+
+ const auto t_enc_end = ggml_time_us();
+
+ const auto t_dec_start = ggml_time_us();
+
+ while (true) {
+ // optionally, generate draft tokens that can be appended to the target batch
+ //
+ // this is the most important part of the speculation. the more probable tokens that are provided here
+ // the better the performance will be. in theory, this computation can be performed asynchronously and even
+ // offloaded to a remote device. it doesn't even have to be based on an LLM. instead, it can provide tokens
+ // from a cache or lookup tables.
+ //
+ llama_tokens draft = common_speculative_draft(spec, params_spec, prompt_tgt, id_last);
+
+ //LOG_DBG("draft: %s\n", string_from(ctx_dft, draft).c_str());
+
+ // always have a token to evaluate from before - id_last
+ common_batch_clear(batch_tgt);
+ common_batch_add (batch_tgt, id_last, n_past++, { 0 }, true);
+
+ // evaluate the target model on [id_last, draft0, draft1, ..., draftN-1]
+ {
+ // do not waste time on small drafts
+ if (draft.size() < (size_t) params_spec.n_min) {
+ draft.clear();
+ }
+
+ for (size_t i = 0; i < draft.size(); ++i) {
+ common_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true);
+ }
+
+ //LOG_DBG("target batch: %s\n", string_from(ctx_tgt, batch_tgt).c_str());
+
+ llama_decode(ctx_tgt, batch_tgt);
+ }
+
+ // sample from the full target batch and return the accepted tokens based on the target sampler
+ //
+ // for each token to be accepted, the sampler would have to sample that same token
+ // in such cases, instead of decoding the sampled token as we normally do, we simply continue with the
+ // available logits from the batch and sample the next token until we run out of logits or the sampler
+ // disagrees with the draft
+ //
+ const auto ids = common_sampler_sample_and_accept_n(smpl, ctx_tgt, draft);
+
+ //LOG_DBG("ids: %s\n", string_from(ctx_tgt, ids).c_str());
+
+ GGML_ASSERT(ids.size() > 0); // there will always be at least one accepted token
+
+ n_past += ids.size() - 1;
+ n_drafted += draft.size(); // note: we ignore the discarded small drafts
+ n_accept += ids.size() - 1;
+ n_predict += ids.size();
+
+ // process the accepted tokens and update contexts
+ //
+ // this is the standard token post-processing that we normally do
+ // in this case, we do it for a group of accepted tokens at once
+ //
+ for (size_t i = 0; i < ids.size(); ++i) {
+ prompt_tgt.push_back(id_last);
+
+ id_last = ids[i];
+
+ if (llama_vocab_is_eog(vocab, id_last)) {
+ has_eos = true;
+ break;
+ }
+
+ const std::string token_str = common_token_to_piece(ctx_tgt, id_last);
+
+ if (params.use_color && i + 1 < ids.size()) {
+ LOG("\u001b[%dm%s\u001b[37m", (36 - 0 % 6), token_str.c_str());
+ } else {
+ LOG("%s", token_str.c_str());
+ }
+ }
+
+ LOG_DBG("accepted %d/%d draft tokens, the last target token is: (%d)\n", (int) ids.size() - 1, (int) draft.size(), id_last);
+
+ {
+ LOG_DBG("clear kv cache from any extra tokens, n_past = %d\n", n_past);
+
+ llama_memory_seq_rm(llama_get_memory(ctx_tgt), 0, n_past, -1);
+ }
+
+ if ((params.n_predict >= 0 && n_predict > params.n_predict) || has_eos) {
+ break;
+ }
+ }
+
+ auto t_dec_end = ggml_time_us();
+
+ const int n_input = inp.size();
+
+ LOG("\n\n");
+
+ LOG_INF("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
+ LOG_INF("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
+
+ LOG_INF("\n");
+ LOG_INF("n_draft = %d\n", params_spec.n_max);
+ LOG_INF("n_predict = %d\n", n_predict);
+ LOG_INF("n_drafted = %d\n", n_drafted);
+ LOG_INF("n_accept = %d\n", n_accept);
+ LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
+
+ LOG_INF("\n");
+ LOG_INF("draft:\n\n");
+
+ LOG_INF("\n");
+ LOG_INF("target:\n\n");
+ common_perf_print(ctx_tgt, smpl);
+
+ llama_batch_free(batch_tgt);
+
+ common_sampler_free(smpl);
+ common_speculative_free(spec);
+
+ llama_backend_free();
+
+ LOG("\n\n");
+
+ return 0;
+}