summaryrefslogtreecommitdiff
path: root/llama.cpp/examples/lookup
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp/examples/lookup')
-rw-r--r--llama.cpp/examples/lookup/CMakeLists.txt23
-rw-r--r--llama.cpp/examples/lookup/README.md12
-rw-r--r--llama.cpp/examples/lookup/lookup-create.cpp40
-rw-r--r--llama.cpp/examples/lookup/lookup-merge.cpp47
-rw-r--r--llama.cpp/examples/lookup/lookup-stats.cpp157
-rw-r--r--llama.cpp/examples/lookup/lookup.cpp242
6 files changed, 521 insertions, 0 deletions
diff --git a/llama.cpp/examples/lookup/CMakeLists.txt b/llama.cpp/examples/lookup/CMakeLists.txt
new file mode 100644
index 0000000..fba78ce
--- /dev/null
+++ b/llama.cpp/examples/lookup/CMakeLists.txt
@@ -0,0 +1,23 @@
+set(TARGET llama-lookup)
+add_executable(${TARGET} lookup.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
+
+set(TARGET llama-lookup-create)
+add_executable(${TARGET} lookup-create.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
+
+set(TARGET llama-lookup-merge)
+add_executable(${TARGET} lookup-merge.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
+
+set(TARGET llama-lookup-stats)
+add_executable(${TARGET} lookup-stats.cpp)
+install(TARGETS ${TARGET} RUNTIME)
+target_link_libraries(${TARGET} PRIVATE common llama ${CMAKE_THREAD_LIBS_INIT})
+target_compile_features(${TARGET} PRIVATE cxx_std_17)
diff --git a/llama.cpp/examples/lookup/README.md b/llama.cpp/examples/lookup/README.md
new file mode 100644
index 0000000..07d7384
--- /dev/null
+++ b/llama.cpp/examples/lookup/README.md
@@ -0,0 +1,12 @@
+# llama.cpp/examples/lookup
+
+Demonstration of Prompt Lookup Decoding
+
+https://github.com/apoorvumang/prompt-lookup-decoding
+
+The key parameters for lookup decoding are `ngram_min`, `ngram_max` and `n_draft`. The first two determine the size of the ngrams to search for in the prompt for a match. The latter specifies how many subsequent tokens to draft if a match is found.
+
+More info:
+
+https://github.com/ggml-org/llama.cpp/pull/4484
+https://github.com/ggml-org/llama.cpp/issues/4226
diff --git a/llama.cpp/examples/lookup/lookup-create.cpp b/llama.cpp/examples/lookup/lookup-create.cpp
new file mode 100644
index 0000000..f7b6ea1
--- /dev/null
+++ b/llama.cpp/examples/lookup/lookup-create.cpp
@@ -0,0 +1,40 @@
+#include "arg.h"
+#include "common.h"
+#include "ngram-cache.h"
+#include "llama.h"
+
+#include <string>
+#include <vector>
+
+int main(int argc, char ** argv){
+ common_params params;
+
+ if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
+ return 1;
+ }
+
+ // init llama.cpp
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ // load the model
+ auto llama_init = common_init_from_params(params);
+
+ auto * model = llama_init->model();
+ auto * ctx = llama_init->context();
+
+ GGML_ASSERT(model != nullptr);
+
+ // tokenize the prompt
+ std::vector<llama_token> inp;
+ inp = common_tokenize(ctx, params.prompt, true, true);
+ fprintf(stderr, "%s: tokenization done\n", __func__);
+
+ common_ngram_cache ngram_cache;
+ common_ngram_cache_update(ngram_cache, LLAMA_NGRAM_STATIC, LLAMA_NGRAM_STATIC, inp, inp.size(), true);
+ fprintf(stderr, "%s: hashing done, writing file to %s\n", __func__, params.speculative.lookup_cache_static.c_str());
+
+ common_ngram_cache_save(ngram_cache, params.speculative.lookup_cache_static);
+
+ return 0;
+}
diff --git a/llama.cpp/examples/lookup/lookup-merge.cpp b/llama.cpp/examples/lookup/lookup-merge.cpp
new file mode 100644
index 0000000..6871c0f
--- /dev/null
+++ b/llama.cpp/examples/lookup/lookup-merge.cpp
@@ -0,0 +1,47 @@
+#include "ggml.h"
+#include "llama.h"
+#include "common.h"
+#include "ngram-cache.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <fstream>
+#include <iostream>
+#include <string>
+#include <unordered_map>
+#include <vector>
+
+static void print_usage(char* argv0) {
+ fprintf(stderr, "Merges multiple lookup cache files into a single one.\n");
+ fprintf(stderr, "Usage: %s [--help] lookup_part_1.bin lookup_part_2.bin ... lookup_merged.bin\n", argv0);
+}
+
+int main(int argc, char ** argv){
+ if (argc < 3) {
+ print_usage(argv[0]);
+ exit(1);
+ }
+
+ std::vector<std::string> args;
+ args.resize(argc-1);
+ for (int i = 0; i < argc-1; ++i) {
+ args[i] = argv[i+1];
+ if (args[i] == "-h" || args[i] == "--help") {
+ print_usage(argv[0]);
+ exit(0);
+ }
+ }
+
+ fprintf(stderr, "lookup-merge: loading file %s\n", args[0].c_str());
+ common_ngram_cache ngram_cache_merged = common_ngram_cache_load(args[0]);
+
+ for (size_t i = 1; i < args.size()-1; ++i) {
+ fprintf(stderr, "lookup-merge: loading file %s\n", args[i].c_str());
+ common_ngram_cache ngram_cache = common_ngram_cache_load(args[i]);
+
+ common_ngram_cache_merge(ngram_cache_merged, ngram_cache);
+ }
+
+ fprintf(stderr, "lookup-merge: saving file %s\n", args.back().c_str());
+ common_ngram_cache_save(ngram_cache_merged, args.back());
+}
diff --git a/llama.cpp/examples/lookup/lookup-stats.cpp b/llama.cpp/examples/lookup/lookup-stats.cpp
new file mode 100644
index 0000000..ae28b2e
--- /dev/null
+++ b/llama.cpp/examples/lookup/lookup-stats.cpp
@@ -0,0 +1,157 @@
+#include "arg.h"
+#include "common.h"
+#include "log.h"
+#include "ngram-cache.h"
+#include "llama.h"
+#include "ggml.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <cinttypes>
+#include <fstream>
+#include <string>
+#include <vector>
+
+int main(int argc, char ** argv){
+ common_params params;
+
+ if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
+ return 1;
+ }
+
+ common_init();
+
+ const int n_draft = params.speculative.n_max;
+
+ // init llama.cpp
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ // load the model
+ auto llama_init = common_init_from_params(params);
+
+ llama_context * ctx = llama_init->context();
+
+ // tokenize the prompt
+ std::vector<llama_token> inp;
+ inp = common_tokenize(ctx, params.prompt, true, true);
+
+ common_ngram_cache ngram_cache_context;
+ common_ngram_cache ngram_cache_dynamic;
+ common_ngram_cache ngram_cache_static;
+
+ int64_t t_draft_flat_us = 0;
+ int64_t t_draft_us = 0;
+
+ {
+ const int64_t t_start_draft_us = ggml_time_us();
+
+ if (!params.speculative.lookup_cache_static.empty()) {
+ try {
+ ngram_cache_static = common_ngram_cache_load(params.speculative.lookup_cache_static);
+ } catch (std::ifstream::failure const &) {
+ LOG_ERR("failed to open static lookup cache: %s", params.speculative.lookup_cache_static.c_str());
+ exit(1);
+ }
+ }
+
+ if (!params.speculative.lookup_cache_dynamic.empty()) {
+ try {
+ ngram_cache_dynamic = common_ngram_cache_load(params.speculative.lookup_cache_dynamic);
+ } catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
+ }
+
+ t_draft_flat_us += ggml_time_us() - t_start_draft_us;
+ }
+
+ const int n_input = inp.size();
+ const int n_ctx = llama_n_ctx(ctx);
+
+ int n_drafted = 0;
+ int n_accept = 0;
+
+ const int64_t t_start_ms = ggml_time_ms();
+
+ // Iterate over input tokens in chunks of size n_ctx.
+ // Each chunk is treated as if a sequential generation but with pre-determined tokens to ensure reproducibility.
+ for (int i_start = 0; i_start + n_ctx < n_input; i_start += n_ctx) {
+ const std::vector<llama_token> inp_slice(inp.begin() + i_start, inp.begin() + i_start + n_ctx);
+ std::vector<llama_token> pseudo_output;
+ pseudo_output.push_back(inp_slice[0]);
+
+ while ((int) pseudo_output.size() < n_ctx) {
+ // Simulate drafting and decoding from draft:
+ std::vector<llama_token> draft;
+ draft.push_back(pseudo_output.back());
+
+ {
+ const int64_t t_start_draft_us = ggml_time_us();
+ common_ngram_cache_draft(pseudo_output, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
+ t_draft_us += ggml_time_us() - t_start_draft_us;
+ }
+
+ n_drafted += draft.size() - 1;
+
+ for (size_t j = 1; j < draft.size() && (int) pseudo_output.size() < n_ctx; ++j) {
+ const llama_token ground_truth = inp_slice[pseudo_output.size()];
+ const llama_token drafted = draft[j];
+
+ if (ground_truth != drafted) {
+ break;
+ }
+
+ ++n_accept;
+ pseudo_output.push_back(ground_truth);
+
+ {
+ const int64_t t_start_draft_us = ggml_time_us();
+ common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
+ t_draft_us += ggml_time_us() - t_start_draft_us;
+ }
+ }
+
+ // After each simulated batch decoding simulate the sampling of a single token:
+ if ((int) pseudo_output.size() < n_ctx) {
+ pseudo_output.push_back(inp_slice[pseudo_output.size()]);
+ {
+ const int64_t t_start_draft_us = ggml_time_us();
+ common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
+ t_draft_us += ggml_time_us() - t_start_draft_us;
+ }
+ }
+
+ draft.erase(draft.begin());
+
+ }
+ if (i_start > 0 && i_start / 100000 != (i_start - n_ctx) / 100000) {
+ const int64_t t_now_ms = ggml_time_ms();
+ const int64_t eta_ms = (n_input - i_start) * (t_now_ms - t_start_ms) / i_start;
+ const int64_t eta_min = eta_ms / (60*1000);
+ const int64_t eta_s = (eta_ms - 60*1000*eta_min) / 1000;
+
+ LOG_INF("lookup-stats: %d/%d done, ETA: %02" PRId64 ":%02" PRId64 "\n", i_start, n_input, eta_min, eta_s);
+ }
+
+ // After each chunk, update the dynamic ngram cache with the context ngram cache:
+ common_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
+ ngram_cache_context.clear();
+ }
+
+ LOG("\n");
+
+ LOG_INF("\n");
+ LOG_INF("n_draft = %d\n", n_draft);
+ LOG_INF("n_predict = %d\n", n_input - n_input % n_ctx);
+ LOG_INF("n_drafted = %d\n", n_drafted);
+ LOG_INF("t_draft_flat = %.2f ms\n", t_draft_flat_us*1e-3);
+ LOG_INF("t_draft = %.2f ms, %.2f us per token, %.2f tokens per second\n",
+ t_draft_us*1e-3, 1.0f*t_draft_us/n_drafted, n_drafted/(1e-6*t_draft_us));
+ LOG_INF("n_accept = %d\n", n_accept);
+ LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
+
+ llama_backend_free();
+
+ LOG("\n\n");
+
+ return 0;
+}
diff --git a/llama.cpp/examples/lookup/lookup.cpp b/llama.cpp/examples/lookup/lookup.cpp
new file mode 100644
index 0000000..c7552dd
--- /dev/null
+++ b/llama.cpp/examples/lookup/lookup.cpp
@@ -0,0 +1,242 @@
+#include "arg.h"
+#include "ggml.h"
+#include "common.h"
+#include "ngram-cache.h"
+#include "sampling.h"
+#include "log.h"
+#include "llama.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <fstream>
+#include <string>
+#include <vector>
+
+int main(int argc, char ** argv){
+ common_params params;
+
+ if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
+ return 1;
+ }
+
+ common_init();
+
+ // max. number of additional tokens to draft if match is found
+ const int n_draft = params.speculative.n_max;
+
+ // init llama.cpp
+ llama_backend_init();
+ llama_numa_init(params.numa);
+
+ // load the model
+ auto llama_init = common_init_from_params(params);
+
+ auto * model = llama_init->model();
+ auto * ctx = llama_init->context();
+
+ const llama_vocab * vocab = llama_model_get_vocab(model);
+
+ // tokenize the prompt
+ std::vector<llama_token> inp;
+ inp = common_tokenize(ctx, params.prompt, true, true);
+
+ common_ngram_cache ngram_cache_context;
+ common_ngram_cache ngram_cache_dynamic;
+ common_ngram_cache ngram_cache_static;
+ int64_t t_draft_flat_us = 0;
+ int64_t t_draft_us = 0;
+
+ {
+ // Fill up context ngram cache with tokens from user input:
+ const int64_t t_start_draft_us = ggml_time_us();
+ common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, inp.size(), false);
+
+ if (!params.speculative.lookup_cache_static.empty()) {
+ try {
+ ngram_cache_static = common_ngram_cache_load(params.speculative.lookup_cache_static);
+ } catch (std::ifstream::failure const &) {
+ LOG_ERR("failed to open static lookup cache: %s", params.speculative.lookup_cache_static.c_str());
+ exit(1);
+ }
+ }
+
+ if (!params.speculative.lookup_cache_dynamic.empty()) {
+ try {
+ ngram_cache_dynamic = common_ngram_cache_load(params.speculative.lookup_cache_dynamic);
+ } catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
+ }
+
+ t_draft_flat_us += ggml_time_us() - t_start_draft_us;
+ }
+
+ const int max_context_size = llama_n_ctx(ctx);
+ const int max_tokens_list_size = max_context_size - 4;
+
+ if ((int) inp.size() > max_tokens_list_size) {
+ LOG_ERR("%s: prompt too long (%d tokens, max %d)\n", __func__, (int) inp.size(), max_tokens_list_size);
+ return 1;
+ }
+
+ LOG("\n\n");
+
+ for (auto id : inp) {
+ LOG("%s", common_token_to_piece(ctx, id).c_str());
+ }
+
+ fflush(stderr);
+
+ const int n_input = inp.size();
+
+ const auto t_enc_start = ggml_time_us();
+
+ llama_decode(ctx, llama_batch_get_one( inp.data(), n_input - 1));
+ llama_decode(ctx, llama_batch_get_one(&inp.back(), 1));
+
+ const auto t_enc_end = ggml_time_us();
+
+ int n_predict = 0;
+ int n_drafted = 0;
+ int n_accept = 0;
+
+ int n_past = inp.size();
+
+ bool has_eos = false;
+
+ struct common_sampler * smpl = common_sampler_init(model, params.sampling);
+
+ std::vector<llama_token> draft;
+
+ llama_batch batch_tgt = llama_batch_init(llama_n_ctx(ctx), 0, 1);
+
+ const auto t_dec_start = ggml_time_us();
+
+ while (true) {
+ // print current draft sequence
+ LOG_DBG("drafted %s\n", string_from(ctx, draft).c_str());
+
+ int i_dft = 0;
+ while (true) {
+ // sample from the target model
+ llama_token id = common_sampler_sample(smpl, ctx, i_dft);
+
+ common_sampler_accept(smpl, id, true);
+
+ const std::string token_str = common_token_to_piece(ctx, id);
+
+ if (!params.use_color) {
+ LOG("%s", token_str.c_str());
+ }
+
+ if (llama_vocab_is_eog(vocab, id)) {
+ has_eos = true;
+ }
+
+ ++n_predict;
+
+ // check if the target token matches the draft
+ if (i_dft < (int) draft.size() && id == draft[i_dft]) {
+ LOG_DBG("the sampled target token matches the %dth drafted token (%d, '%s') - accepted\n", i_dft, id, token_str.c_str());
+ ++n_accept;
+ ++n_past;
+ ++i_dft;
+ inp.push_back(id);
+ {
+ // Update context ngram cache with the newly accepted token:
+ const int64_t t_start_draft_us = ggml_time_us();
+ common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
+ t_draft_us += ggml_time_us() - t_start_draft_us;
+ }
+
+ if (params.use_color) {
+ // color accepted draft token
+ LOG("\033[34m%s\033[0m", token_str.c_str());
+ fflush(stdout);
+ }
+ continue;
+ }
+
+ if (params.use_color) {
+ LOG("%s", token_str.c_str());
+ }
+ fflush(stdout);
+
+
+ LOG_DBG("the sampled target token (%d, '%s') did not match, or we ran out of drafted tokens\n", id, token_str.c_str());
+
+ draft.clear();
+ draft.push_back(id);
+ inp.push_back(id);
+ {
+ // Update context ngram cache with the newly accepted token:
+ const int64_t t_start_draft_us = ggml_time_us();
+ common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, inp, 1, false);
+ t_draft_us += ggml_time_us() - t_start_draft_us;
+ }
+ break;
+ }
+
+ if ((params.n_predict > 0 && n_predict > params.n_predict) || has_eos) {
+ break;
+ }
+
+ // KV cache management
+ // clean the cache of draft tokens that weren't accepted
+ llama_memory_seq_rm(llama_get_memory(ctx), 0, n_past, -1);
+
+ common_batch_clear(batch_tgt);
+ common_batch_add(batch_tgt, draft[0], n_past, { 0 }, true);
+
+ // Draft already contains a single token sampled from the model:
+ GGML_ASSERT(draft.size() == 1);
+ GGML_ASSERT(draft[0] == inp.back());
+ const int64_t t_start_draft_us = ggml_time_us();
+
+ common_ngram_cache_draft(inp, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
+
+ for (size_t i = 1; i < draft.size(); ++i) {
+ common_batch_add(batch_tgt, draft[i], n_past + i, { 0 }, true);
+ }
+
+ t_draft_us += ggml_time_us() - t_start_draft_us;
+ n_drafted += draft.size() - 1;
+
+ llama_decode(ctx, batch_tgt);
+ ++n_past;
+
+ draft.erase(draft.begin());
+ }
+
+ auto t_dec_end = ggml_time_us();
+
+ // Update dynamic ngram cache with context ngram cache and save it to disk:
+ common_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
+ common_ngram_cache_save(ngram_cache_dynamic, params.speculative.lookup_cache_dynamic);
+
+ LOG("\n\n");
+
+ LOG_INF("encoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_input, (t_enc_end - t_enc_start) / 1e6f, inp.size() / ((t_enc_end - t_enc_start) / 1e6f));
+ LOG_INF("decoded %4d tokens in %8.3f seconds, speed: %8.3f t/s\n", n_predict, (t_dec_end - t_dec_start) / 1e6f, n_predict / ((t_dec_end - t_dec_start) / 1e6f));
+
+ LOG_INF("\n");
+ LOG_INF("n_draft = %d\n", n_draft);
+ LOG_INF("n_predict = %d\n", n_predict);
+ LOG_INF("n_drafted = %d\n", n_drafted);
+ LOG_INF("t_draft_flat = %.2f ms\n", t_draft_flat_us*1e-3);
+ LOG_INF("t_draft = %.2f ms, %.2f us per token, %.2f tokens per second\n",
+ t_draft_us*1e-3, 1.0f*t_draft_us/n_drafted, n_drafted/(1e-6*t_draft_us));
+ LOG_INF("n_accept = %d\n", n_accept);
+ LOG_INF("accept = %.3f%%\n", 100.0f * n_accept / n_drafted);
+
+ LOG_INF("\ntarget:\n\n");
+ common_perf_print(ctx, smpl);
+
+ common_sampler_free(smpl);
+
+ llama_batch_free(batch_tgt);
+
+ llama_backend_free();
+
+ LOG("\n\n");
+
+ return 0;
+}