1#include "arg.h"
  2#include "common.h"
  3#include "log.h"
  4#include "ngram-cache.h"
  5#include "llama.h"
  6#include "ggml.h"
  7
  8#include <cstdint>
  9#include <cstdio>
 10#include <cinttypes>
 11#include <fstream>
 12#include <string>
 13#include <vector>
 14
 15int main(int argc, char ** argv){
 16    common_params params;
 17
 18    if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_LOOKUP)) {
 19        return 1;
 20    }
 21
 22    common_init();
 23
 24    const int n_draft = params.speculative.n_max;
 25
 26    // init llama.cpp
 27    llama_backend_init();
 28    llama_numa_init(params.numa);
 29
 30    // load the model
 31    auto llama_init = common_init_from_params(params);
 32
 33    llama_context * ctx = llama_init->context();
 34
 35    // tokenize the prompt
 36    std::vector<llama_token> inp;
 37    inp = common_tokenize(ctx, params.prompt, true, true);
 38
 39    common_ngram_cache ngram_cache_context;
 40    common_ngram_cache ngram_cache_dynamic;
 41    common_ngram_cache ngram_cache_static;
 42
 43    int64_t t_draft_flat_us = 0;
 44    int64_t t_draft_us = 0;
 45
 46    {
 47        const int64_t t_start_draft_us = ggml_time_us();
 48
 49        if (!params.speculative.lookup_cache_static.empty()) {
 50            try {
 51                ngram_cache_static = common_ngram_cache_load(params.speculative.lookup_cache_static);
 52            } catch (std::ifstream::failure const &) {
 53                LOG_ERR("failed to open static lookup cache: %s", params.speculative.lookup_cache_static.c_str());
 54                exit(1);
 55            }
 56        }
 57
 58        if (!params.speculative.lookup_cache_dynamic.empty()) {
 59            try {
 60                ngram_cache_dynamic = common_ngram_cache_load(params.speculative.lookup_cache_dynamic);
 61            } catch (std::ifstream::failure const &) {} // if the file does not exist it will simply be created at the end of the program
 62        }
 63
 64        t_draft_flat_us += ggml_time_us() - t_start_draft_us;
 65    }
 66
 67    const int n_input = inp.size();
 68    const int n_ctx = llama_n_ctx(ctx);
 69
 70    int n_drafted = 0;
 71    int n_accept  = 0;
 72
 73    const int64_t t_start_ms = ggml_time_ms();
 74
 75    // Iterate over input tokens in chunks of size n_ctx.
 76    // Each chunk is treated as if a sequential generation but with pre-determined tokens to ensure reproducibility.
 77    for (int i_start = 0; i_start + n_ctx < n_input; i_start += n_ctx) {
 78        const std::vector<llama_token> inp_slice(inp.begin() + i_start, inp.begin() + i_start + n_ctx);
 79        std::vector<llama_token> pseudo_output;
 80        pseudo_output.push_back(inp_slice[0]);
 81
 82        while ((int) pseudo_output.size() < n_ctx) {
 83            // Simulate drafting and decoding from draft:
 84            std::vector<llama_token> draft;
 85            draft.push_back(pseudo_output.back());
 86
 87            {
 88                const int64_t t_start_draft_us = ggml_time_us();
 89                common_ngram_cache_draft(pseudo_output, draft, n_draft, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, ngram_cache_context, ngram_cache_dynamic, ngram_cache_static);
 90                t_draft_us += ggml_time_us() - t_start_draft_us;
 91            }
 92
 93            n_drafted += draft.size() - 1;
 94
 95            for (size_t j = 1; j < draft.size() && (int) pseudo_output.size() < n_ctx; ++j) {
 96                const llama_token ground_truth = inp_slice[pseudo_output.size()];
 97                const llama_token drafted = draft[j];
 98
 99                if (ground_truth != drafted) {
100                    break;
101                }
102
103                ++n_accept;
104                pseudo_output.push_back(ground_truth);
105
106                {
107                    const int64_t t_start_draft_us = ggml_time_us();
108                    common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
109                    t_draft_us += ggml_time_us() - t_start_draft_us;
110                }
111            }
112
113            // After each simulated batch decoding simulate the sampling of a single token:
114            if ((int) pseudo_output.size() < n_ctx) {
115                pseudo_output.push_back(inp_slice[pseudo_output.size()]);
116                {
117                    const int64_t t_start_draft_us = ggml_time_us();
118                    common_ngram_cache_update(ngram_cache_context, LLAMA_NGRAM_MIN, LLAMA_NGRAM_MAX, pseudo_output, 1, false);
119                    t_draft_us += ggml_time_us() - t_start_draft_us;
120                }
121            }
122
123            draft.erase(draft.begin());
124
125        }
126        if (i_start > 0 && i_start / 100000 != (i_start - n_ctx) / 100000) {
127            const int64_t t_now_ms = ggml_time_ms();
128            const int64_t eta_ms   = (n_input - i_start) * (t_now_ms - t_start_ms) / i_start;
129            const int64_t eta_min  = eta_ms / (60*1000);
130            const int64_t eta_s    = (eta_ms - 60*1000*eta_min) / 1000;
131
132            LOG_INF("lookup-stats: %d/%d done, ETA: %02" PRId64 ":%02" PRId64 "\n", i_start, n_input, eta_min, eta_s);
133        }
134
135        // After each chunk, update the dynamic ngram cache with the context ngram cache:
136        common_ngram_cache_merge(ngram_cache_dynamic, ngram_cache_context);
137        ngram_cache_context.clear();
138    }
139
140    LOG("\n");
141
142    LOG_INF("\n");
143    LOG_INF("n_draft      = %d\n", n_draft);
144    LOG_INF("n_predict    = %d\n", n_input - n_input % n_ctx);
145    LOG_INF("n_drafted    = %d\n", n_drafted);
146    LOG_INF("t_draft_flat = %.2f ms\n", t_draft_flat_us*1e-3);
147    LOG_INF("t_draft      = %.2f ms, %.2f us per token, %.2f tokens per second\n",
148            t_draft_us*1e-3, 1.0f*t_draft_us/n_drafted, n_drafted/(1e-6*t_draft_us));
149    LOG_INF("n_accept     = %d\n", n_accept);
150    LOG_INF("accept       = %.3f%%\n", 100.0f * n_accept / n_drafted);
151
152    llama_backend_free();
153
154    LOG("\n\n");
155
156    return 0;
157}