1#include "debug.h"
2#include "arg.h"
3#include "common.h"
4#include "log.h"
5#include "llama.h"
6
7#include <cstdlib>
8#include <string>
9#include <vector>
10#include <filesystem>
11#include <fstream>
12#include <regex>
13
14static void print_usage(int /*argc*/, char ** argv) {
15 const std::string usage_template = R"(
16 example usage:
17
18 Print tensors:
19
20 {prog} -m model.gguf -p "Hello my name is" --verbose
21
22 The tensors to be printed can be filtered with --tensor-filter option.
23
24 Save logits/embeddings:
25
26 {prog} -m model.gguf -p "Hello my name is" --save-logits
27
28 Add --embedding to save embeddings)" "\n";
29
30 // Fix the source code indentation above that is introduced by the raw string literal.
31 std::string usage = std::regex_replace(usage_template, std::regex("\\n {8}"), "\n");
32 usage = std::regex_replace(usage, std::regex("\\{prog\\}"), argv[0]);
33 LOG("%s\n", usage.c_str());
34}
35
36static bool has_pooling(llama_context * ctx) {
37 switch (llama_pooling_type(ctx)) {
38 case LLAMA_POOLING_TYPE_NONE:
39 case LLAMA_POOLING_TYPE_UNSPECIFIED:
40 return false;
41 default:
42 return true;
43 }
44}
45
46struct output_data {
47 float * data_ptr = nullptr;
48 int data_size = 0;
49 std::string type_suffix;
50 std::vector<float> embd_norm;
51 std::string prompt;
52 std::vector<llama_token> tokens;
53
54 output_data(llama_context * ctx, const llama_model * model, const common_params & params) {
55 const llama_vocab * vocab = llama_model_get_vocab(model);
56 const bool add_bos = llama_vocab_get_add_bos(vocab);
57
58 tokens = common_tokenize(ctx, params.prompt, add_bos);
59 prompt = params.prompt;
60
61 if (params.embedding) {
62 const int n_embd = llama_model_n_embd_out(model);
63 const bool pooling = has_pooling(ctx);
64 const int n_embd_count = pooling ? 1 : tokens.size();
65 const int n_floats = n_embd * n_embd_count;
66
67 float * embd_raw = pooling ? llama_get_embeddings_seq(ctx, 0) : llama_get_embeddings(ctx);
68 if (embd_raw == nullptr) {
69 throw std::runtime_error("failed to get embeddings from the model");
70 }
71
72 LOG_DBG("pooling_enabled: %s\n", pooling ? "true" : "false");
73 LOG_DBG("n_embd: %d\n", n_embd);
74 LOG_DBG("n_floats: %d\n", n_floats);
75 LOG_DBG("n_embd_count: %d\n", n_embd_count);
76
77 data_ptr = embd_raw;
78 data_size = n_floats;
79 type_suffix = "-embeddings";
80
81 if (params.embd_normalize >= 0) {
82 embd_norm.resize(n_floats);
83 for (int i = 0; i < n_embd_count; i++) {
84 common_embd_normalize(embd_raw+i*n_embd, embd_norm.data()+i*n_embd, n_embd, params.embd_normalize);
85 }
86 data_ptr = embd_norm.data();
87 }
88 } else {
89 const float * logits = llama_get_logits_ith(ctx, tokens.size() - 1);
90 const int n_logits = llama_vocab_n_tokens(vocab);
91
92 data_ptr = const_cast<float*>(logits);
93 data_size = n_logits;
94 type_suffix = "";
95 }
96 }
97};
98
99static void save_output_data(const output_data & output, const std::string & model_name, const std::string & output_dir) {
100 std::filesystem::create_directory(output_dir);
101 auto base_path = std::filesystem::path{output_dir} / ("llamacpp-" + model_name + output.type_suffix);
102
103 // Save logits/embeddings to binary file.
104 {
105 std::filesystem::path filepath{base_path.string() + ".bin"};
106 std::ofstream file{filepath, std::ios::binary};
107 if (!file) {
108 throw std::runtime_error("failed to open binary output file: " + filepath.string());
109 }
110 file.write(reinterpret_cast<const char*>(output.data_ptr), output.data_size * sizeof(float));
111 LOG("Data saved to %s\n", filepath.c_str());
112 }
113
114 // Save logits/embeddings to text file.
115 {
116 std::filesystem::path filepath{base_path.string() + ".txt"};
117 std::ofstream file{filepath};
118 if (!file) {
119 throw std::runtime_error("failed to open text output file: " + filepath.string());
120 }
121 for (int i = 0; i < output.data_size; i++) {
122 file << i << ": " << output.data_ptr[i] << '\n';
123 }
124 LOG("Data saved to %s\n", filepath.c_str());
125 }
126
127 // Save prompt and tokens to text file.
128 {
129 std::filesystem::path filepath{base_path.string() + "-prompt.txt"};
130 std::ofstream file{filepath};
131 if (!file) {
132 throw std::runtime_error("failed to open prompt output file: " + filepath.string());
133 }
134
135 file << "prompt: " << output.prompt << '\n';
136 file << "n_tokens: " << output.tokens.size() << '\n';
137
138 file << "token ids: ";
139 for (size_t i = 0; i < output.tokens.size(); i++) {
140 file << output.tokens[i];
141 if (i + 1 < output.tokens.size()) {
142 file << ", ";
143 }
144 }
145 file << '\n';
146 LOG("Prompt saved to %s\n", filepath.c_str());
147 }
148
149 // Save token ids to binary file.
150 {
151 std::filesystem::path filepath{base_path.string() + "-tokens.bin"};
152 std::ofstream file{filepath, std::ios::binary};
153 if (!file) {
154 throw std::runtime_error("failed to open tokens binary file: " + filepath.string());
155 }
156 file.write(reinterpret_cast<const char*>(output.tokens.data()), output.tokens.size() * sizeof(llama_token));
157 LOG("Tokens saved to %s\n", filepath.c_str());
158 }
159
160}
161
162static void print_tokenized_prompt(llama_context * ctx, const std::vector<llama_token> & tokens, const std::string & prompt) {
163 const llama_model * model = llama_get_model(ctx);
164 const llama_vocab * vocab = llama_model_get_vocab(model);
165
166 LOG("Model add_bos: %s\n", llama_vocab_get_add_bos(vocab) ? "true" : "false");
167 LOG("Input prompt: \"%s\"\n", prompt.c_str());
168 LOG("Token ids (%zu):\n", tokens.size());
169
170 for (auto id : tokens) {
171 std::string piece(128, '\0');
172 int n = llama_token_to_piece(vocab, id, piece.data(), piece.size(), 0, true);
173 if (n < 0) {
174 LOG_ERR("failed to convert token %d to piece\n", id);
175 continue;
176 }
177 piece.resize(n);
178 LOG("%s(%d) ", piece.c_str(), id);
179 }
180 LOG("\n");
181}
182
183static bool run(llama_context * ctx, const common_params & params) {
184 const llama_model * model = llama_get_model(ctx);
185 const llama_vocab * vocab = llama_model_get_vocab(model);
186
187 const bool add_bos = llama_vocab_get_add_bos(vocab);
188
189 std::vector<llama_token> tokens = common_tokenize(ctx, params.prompt, add_bos);
190
191 if (tokens.empty()) {
192 LOG_ERR("%s : there are not input tokens to process - (try to provide a prompt with '-p')\n", __func__);
193 return false;
194 }
195
196 if (llama_decode(ctx, llama_batch_get_one(tokens.data(), tokens.size()))) {
197 LOG_ERR("%s : failed to eval\n", __func__);
198 return false;
199 }
200
201 print_tokenized_prompt(ctx, tokens, params.prompt);
202
203 if (params.save_logits) {
204 output_data output {ctx, model, params};
205 std::filesystem::path model_path{params.model.path};
206 std::string model_name{model_path.stem().string()};
207 save_output_data(output, model_name, params.logits_output_dir);
208 }
209
210 return true;
211}
212
213int main(int argc, char ** argv) {
214 common_params params;
215
216 if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_DEBUG, print_usage)) {
217 return 1;
218 }
219
220 common_init();
221
222 llama_backend_init();
223 llama_numa_init(params.numa);
224
225 base_callback_data cb_data(params, params.tensor_filter);
226
227 auto llama_init = common_init_from_params(params);
228
229 auto * model = llama_init->model();
230 auto * ctx = llama_init->context();
231
232 if (model == nullptr || ctx == nullptr) {
233 LOG_ERR("%s : failed to init\n", __func__);
234 return 1;
235 }
236
237 {
238 LOG_INF("\n");
239 LOG_INF("%s\n", common_params_get_system_info(params).c_str());
240 LOG_INF("\n");
241 }
242
243 if (!run(ctx, params)) {
244 return 1;
245 }
246
247 LOG("\n");
248 llama_perf_context_print(ctx);
249
250 llama_backend_free();
251
252 return 0;
253}