1#include "arg.h"
2#include "common.h"
3#include "debug.h"
4#include "log.h"
5#include "llama.h"
6#include "llama-cpp.h"
7#include <string>
8#include <vector>
9
10static bool run(llama_context * ctx, const common_params & params) {
11 const llama_model * model = llama_get_model(ctx);
12 const llama_vocab * vocab = llama_model_get_vocab(model);
13
14 const bool add_bos = llama_vocab_get_add_bos(vocab);
15
16 std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
17
18 if (tokens.empty()) {
19 LOG_ERR("%s : there are not input tokens to process - (try to provide a prompt with '-p')\n", __func__);
20 return false;
21 }
22
23 if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
24 LOG_ERR("%s : failed to eval\n", __func__);
25 return false;
26 }
27
28 return true;
29}
30
31int main(int argc, char ** argv) {
32 base_callback_data cb_data;
33
34 common_params params;
35
36 if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMMON)) {
37 return 1;
38 }
39
40 common_init();
41
42 llama_backend_init();
43 llama_numa_init(params.numa);
44
45 // pass the callback to the backend scheduler
46 // it will be executed for each node during the graph computation
47 params.cb_eval = common_debug_cb_eval<false>;
48 params.cb_eval_user_data = &cb_data;
49 params.warmup = false;
50
51 // init
52 auto llama_init = common_init_from_params(params);
53
54 auto * model = llama_init->model();
55 auto * ctx = llama_init->context();
56
57 if (model == nullptr || ctx == nullptr) {
58 LOG_ERR("%s : failed to init\n", __func__);
59 return 1;
60 }
61
62 // print system information
63 {
64 LOG_INF("\n");
65 LOG_INF("%s\n", common_params_get_system_info(params).c_str());
66 LOG_INF("\n");
67 }
68
69 bool OK = run(ctx, params);
70 if (!OK) {
71 return 1;
72 }
73
74 LOG("\n");
75 llama_perf_context_print(ctx);
76
77 llama_backend_free();
78
79 return 0;
80}