1#include "arg.h"
2#include "common.h"
3#include "console.h"
4#include "log.h"
5#include "sampling.h"
6#include "llama.h"
7#include "chat.h"
8
9#include <cstdio>
10#include <cstring>
11#include <ctime>
12#include <fstream>
13#include <iostream>
14#include <sstream>
15#include <string>
16#include <vector>
17
18#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
19#include <signal.h>
20#include <unistd.h>
21#elif defined (_WIN32)
22#define WIN32_LEAN_AND_MEAN
23#ifndef NOMINMAX
24#define NOMINMAX
25#endif
26#include <windows.h>
27#include <signal.h>
28#endif
29
30#if defined(_MSC_VER)
31#pragma warning(disable: 4244 4267) // possible loss of data
32#endif
33
34static llama_context ** g_ctx;
35static llama_model ** g_model;
36static common_sampler ** g_smpl;
37static common_params * g_params;
38static std::vector<llama_token> * g_input_tokens;
39static std::ostringstream * g_output_ss;
40static std::vector<llama_token> * g_output_tokens;
41static bool is_interacting = false;
42static bool need_insert_eot = false;
43
44static void print_usage(int argc, char ** argv) {
45 (void) argc;
46
47 LOG("\nexample usage:\n");
48 LOG("\n text generation: %s -m your_model.gguf -p \"I believe the meaning of life is\" -n 128 -no-cnv\n", argv[0]);
49 LOG("\n chat (conversation): %s -m your_model.gguf -sys \"You are a helpful assistant\"\n", argv[0]);
50 LOG("\n");
51}
52
53static bool file_exists(const std::string & path) {
54 std::ifstream f(path.c_str());
55 return f.good();
56}
57
58static bool file_is_empty(const std::string & path) {
59 std::ifstream f;
60 f.exceptions(std::ifstream::failbit | std::ifstream::badbit);
61 f.open(path.c_str(), std::ios::in | std::ios::binary | std::ios::ate);
62 return f.tellg() == 0;
63}
64
65#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
66static void sigint_handler(int signo) {
67 if (signo == SIGINT) {
68 if (!is_interacting && g_params->interactive) {
69 is_interacting = true;
70 need_insert_eot = true;
71 } else {
72 console::cleanup();
73 LOG("\n");
74 common_perf_print(*g_ctx, *g_smpl);
75
76 // make sure all logs are flushed
77 LOG("Interrupted by user\n");
78 common_log_pause(common_log_main());
79
80 _exit(130);
81 }
82 }
83}
84#endif
85
86int main(int argc, char ** argv) {
87 common_params params;
88 g_params = ¶ms;
89
90 if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_COMPLETION, print_usage)) {
91 return 1;
92 }
93
94 common_init();
95
96 auto & sparams = params.sampling;
97
98 // save choice to use color for later
99 // (note for later: this is a slightly awkward choice)
100 console::init(params.simple_io, params.use_color);
101 atexit([]() { console::cleanup(); });
102
103 if (params.embedding) {
104 LOG_ERR("************\n");
105 LOG_ERR("%s: please use the 'embedding' tool for embedding calculations\n", __func__);
106 LOG_ERR("************\n\n");
107
108 return 0;
109 }
110
111 if (params.n_ctx != 0 && params.n_ctx < 8) {
112 LOG_WRN("%s: warning: minimum context size is 8, using minimum size.\n", __func__);
113 params.n_ctx = 8;
114 }
115
116 if (params.rope_freq_base != 0.0) {
117 LOG_WRN("%s: warning: changing RoPE frequency base to %g.\n", __func__, params.rope_freq_base);
118 }
119
120 if (params.rope_freq_scale != 0.0) {
121 LOG_WRN("%s: warning: scaling RoPE frequency by %g.\n", __func__, params.rope_freq_scale);
122 }
123
124 LOG_INF("%s: llama backend init\n", __func__);
125
126 llama_backend_init();
127 llama_numa_init(params.numa);
128
129 llama_model * model = nullptr;
130 llama_context * ctx = nullptr;
131 common_sampler * smpl = nullptr;
132
133 g_model = &model;
134 g_ctx = &ctx;
135 g_smpl = &smpl;
136
137 std::vector<common_chat_msg> chat_msgs;
138
139 // load the model and apply lora adapter, if any
140 LOG_INF("%s: load the model and apply lora adapter, if any\n", __func__);
141
142 auto llama_init = common_init_from_params(params);
143
144 ctx = llama_init->context();
145 model = llama_init->model();
146 smpl = llama_init->sampler(0);
147
148 if (ctx == NULL) {
149 LOG_ERR("%s: error: unable to create context\n", __func__);
150 return 1;
151 }
152
153 llama_memory_t mem = llama_get_memory(ctx);
154 const llama_vocab * vocab = llama_model_get_vocab(model);
155
156 // note: the time for chat template initialization is not negligible:
157 auto chat_templates = common_chat_templates_init(model, params.chat_template);
158
159 // start measuring performance timings from here
160 llama_perf_context_reset(ctx);
161
162 LOG_INF("%s: llama threadpool init, n_threads = %d\n", __func__, (int) params.cpuparams.n_threads);
163
164 auto * cpu_dev = ggml_backend_dev_by_type(GGML_BACKEND_DEVICE_TYPE_CPU);
165 if (!cpu_dev) {
166 LOG_ERR("%s: no CPU backend found\n", __func__);
167 return 1;
168 }
169 auto * reg = ggml_backend_dev_backend_reg(cpu_dev);
170 auto * ggml_threadpool_new_fn = (decltype(ggml_threadpool_new) *) ggml_backend_reg_get_proc_address(reg, "ggml_threadpool_new");
171 auto * ggml_threadpool_free_fn = (decltype(ggml_threadpool_free) *) ggml_backend_reg_get_proc_address(reg, "ggml_threadpool_free");
172
173 struct ggml_threadpool_params tpp_batch =
174 ggml_threadpool_params_from_cpu_params(params.cpuparams_batch);
175 struct ggml_threadpool_params tpp =
176 ggml_threadpool_params_from_cpu_params(params.cpuparams);
177
178 if (!set_process_priority(params.cpuparams.priority)) {
179 LOG_ERR("%s: error: failed to set process priority\n", __func__);
180 return 1;
181 }
182
183 struct ggml_threadpool * threadpool_batch = NULL;
184 if (!ggml_threadpool_params_match(&tpp, &tpp_batch)) {
185 threadpool_batch = ggml_threadpool_new_fn(&tpp_batch);
186 if (!threadpool_batch) {
187 LOG_ERR("%s: batch threadpool create failed : n_threads %d\n", __func__, tpp_batch.n_threads);
188 return 1;
189 }
190
191 // start the non-batch threadpool in the paused state
192 tpp.paused = true;
193 }
194
195 struct ggml_threadpool * threadpool = ggml_threadpool_new_fn(&tpp);
196 if (!threadpool) {
197 LOG_ERR("%s: threadpool create failed : n_threads %d\n", __func__, tpp.n_threads);
198 return 1;
199 }
200
201 llama_attach_threadpool(ctx, threadpool, threadpool_batch);
202
203 const int n_ctx_train = llama_model_n_ctx_train(model);
204 const int n_ctx = llama_n_ctx(ctx);
205
206 if (n_ctx > n_ctx_train) {
207 LOG_WRN("%s: model was trained on only %d context tokens (%d specified)\n", __func__, n_ctx_train, n_ctx);
208 }
209
210 // auto enable conversation mode if chat template is available
211 const bool has_chat_template = common_chat_templates_was_explicit(chat_templates.get());
212 if (params.conversation_mode == COMMON_CONVERSATION_MODE_AUTO) {
213 if (has_chat_template) {
214 LOG_INF("%s: chat template is available, enabling conversation mode (disable it with -no-cnv)\n", __func__);
215 params.conversation_mode = COMMON_CONVERSATION_MODE_ENABLED;
216 } else {
217 params.conversation_mode = COMMON_CONVERSATION_MODE_DISABLED;
218 }
219 }
220
221 // in case user force-activate conversation mode (via -cnv) without proper chat template, we show a warning
222 if (params.conversation_mode && !has_chat_template) {
223 LOG_WRN("%s: chat template is not available or is not supported. This may cause the model to output suboptimal responses\n", __func__);
224 }
225
226 // print chat template example in conversation mode
227 if (params.conversation_mode) {
228 if (params.enable_chat_template) {
229 if (!params.prompt.empty() && params.system_prompt.empty()) {
230 LOG_WRN("*** User-specified prompt will pre-start conversation, did you mean to set --system-prompt (-sys) instead?\n");
231 }
232
233 LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(chat_templates.get(), params.use_jinja, params.default_template_kwargs).c_str());
234 } else {
235 LOG_INF("%s: in-suffix/prefix is specified, chat template will be disabled\n", __func__);
236 }
237 }
238
239 // print system information
240 {
241 LOG_INF("\n");
242 LOG_INF("%s\n", common_params_get_system_info(params).c_str());
243 LOG_INF("\n");
244 }
245
246 std::string path_session = params.path_prompt_cache;
247 std::vector<llama_token> session_tokens;
248
249 if (!path_session.empty()) {
250 LOG_INF("%s: attempting to load saved session from '%s'\n", __func__, path_session.c_str());
251 if (!file_exists(path_session)) {
252 LOG_INF("%s: session file does not exist, will create.\n", __func__);
253 } else if (file_is_empty(path_session)) {
254 LOG_INF("%s: The session file is empty. A new session will be initialized.\n", __func__);
255 } else {
256 // The file exists and is not empty
257 session_tokens.resize(n_ctx);
258 size_t n_token_count_out = 0;
259 if (!llama_state_load_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.capacity(), &n_token_count_out)) {
260 LOG_ERR("%s: failed to load session file '%s'\n", __func__, path_session.c_str());
261 return 1;
262 }
263 session_tokens.resize(n_token_count_out);
264 LOG_INF("%s: loaded a session with prompt size of %d tokens\n", __func__, (int)session_tokens.size());
265 }
266 }
267
268 const bool add_bos = llama_vocab_get_add_bos(vocab) && !params.use_jinja;
269 if (!llama_model_has_encoder(model)) {
270 GGML_ASSERT(!llama_vocab_get_add_eos(vocab));
271 }
272
273 LOG_DBG("n_ctx: %d, add_bos: %d\n", n_ctx, add_bos);
274
275 std::vector<llama_token> embd_inp;
276
277 bool waiting_for_first_input = false;
278 auto chat_add_and_format = [&chat_msgs, &chat_templates](const std::string & role, const std::string & content) {
279 common_chat_msg new_msg;
280 new_msg.role = role;
281 new_msg.content = content;
282 auto formatted = common_chat_format_single(chat_templates.get(), chat_msgs, new_msg, role == "user", g_params->use_jinja);
283 chat_msgs.push_back(new_msg);
284 LOG_DBG("formatted: '%s'\n", formatted.c_str());
285 return formatted;
286 };
287
288 std::string prompt;
289 {
290 if (params.conversation_mode && params.enable_chat_template) {
291 if (!params.system_prompt.empty()) {
292 // format the system prompt (will use template default if empty)
293 chat_add_and_format("system", params.system_prompt);
294 }
295
296 if (!params.prompt.empty()) {
297 // format and append the user prompt
298 chat_add_and_format("user", params.prompt);
299 } else {
300 waiting_for_first_input = true;
301 }
302
303 if (!params.system_prompt.empty() || !params.prompt.empty()) {
304 common_chat_templates_inputs inputs;
305 inputs.use_jinja = g_params->use_jinja;
306 inputs.messages = chat_msgs;
307 inputs.add_generation_prompt = !params.prompt.empty();
308
309 prompt = common_chat_templates_apply(chat_templates.get(), inputs).prompt;
310 }
311 } else {
312 // otherwise use the prompt as is
313 prompt = params.prompt;
314 }
315
316 if (params.interactive_first || !prompt.empty() || session_tokens.empty()) {
317 LOG_DBG("tokenize the prompt\n");
318 embd_inp = common_tokenize(ctx, prompt, true, true);
319 } else {
320 LOG_DBG("use session tokens\n");
321 embd_inp = session_tokens;
322 }
323
324 LOG_DBG("prompt: \"%s\"\n", prompt.c_str());
325 LOG_DBG("tokens: %s\n", string_from(ctx, embd_inp).c_str());
326 }
327
328 // Should not run without any tokens
329 if (!waiting_for_first_input && embd_inp.empty()) {
330 if (add_bos) {
331 embd_inp.push_back(llama_vocab_bos(vocab));
332 LOG_WRN("embd_inp was considered empty and bos was added: %s\n", string_from(ctx, embd_inp).c_str());
333 } else {
334 LOG_ERR("input is empty\n");
335 return -1;
336 }
337 }
338
339 // Tokenize negative prompt
340 if ((int) embd_inp.size() > n_ctx - 4) {
341 LOG_ERR("%s: prompt is too long (%d tokens, max %d)\n", __func__, (int) embd_inp.size(), n_ctx - 4);
342 return 1;
343 }
344
345 bool session_do_save = false;
346
347 {
348 size_t n_match = 0;
349
350 if (!session_tokens.empty()) {
351 for (llama_token id : session_tokens) {
352 if (n_match >= embd_inp.size() || id != embd_inp[n_match]) {
353 break;
354 }
355 n_match++;
356 }
357 if (params.prompt.empty() && n_match == embd_inp.size()) {
358 LOG_INF("%s: using full prompt from session file\n", __func__);
359 } else if (n_match >= embd_inp.size()) {
360 LOG_INF("%s: session file has exact match for prompt!\n", __func__);
361 } else if (n_match < (embd_inp.size() / 2)) {
362 LOG_WRN("%s: session file has low similarity to prompt (%zu / %zu tokens); will mostly be reevaluated\n",
363 __func__, n_match, embd_inp.size());
364 } else {
365 LOG_INF("%s: session file matches %zu / %zu tokens of prompt\n",
366 __func__, n_match, embd_inp.size());
367 }
368
369 if (session_tokens.size() == n_match) {
370 // [TAG_CONTEXT_STATE_LOGITS]
371 // in this case, we are going to reuse the logits from the session
372 // if we ever decide to remove the logits from the session, we need to handle this somehow
373 // ref: https://github.com/ggml-org/llama.cpp/pull/18862#issuecomment-3756330941
374 }
375
376 // remove any "future" tokens that we might have inherited from the previous session
377 if (session_tokens.size() > n_match) {
378 if (!llama_memory_seq_rm(mem, -1, n_match, -1)) {
379 LOG_WRN("%s: unable to resuse common prefix (for example, when the memory is recurrent)\n", __func__);
380 llama_memory_clear(mem, true);
381 session_tokens.clear();
382 n_match = 0;
383 } else {
384 session_tokens.resize(n_match);
385 }
386 }
387 }
388
389 session_do_save = !path_session.empty() && n_match < embd_inp.size() && !params.prompt_cache_ro;
390 }
391
392 // number of tokens to keep when resetting context
393 if (params.n_keep < 0 || params.n_keep > (int) embd_inp.size()) {
394 params.n_keep = (int)embd_inp.size();
395 } else {
396 params.n_keep += add_bos; // always keep the BOS token
397 }
398
399 if (params.conversation_mode) {
400 if (params.single_turn && !params.prompt.empty()) {
401 params.interactive = false;
402 params.interactive_first = false;
403 } else {
404 params.interactive_first = true;
405 }
406 }
407
408 // enable interactive mode if interactive start is specified
409 if (params.interactive_first) {
410 params.interactive = true;
411 }
412
413 if (params.verbose_prompt) {
414 LOG_INF("%s: prompt: '%s'\n", __func__, params.prompt.c_str());
415 LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size());
416 for (int i = 0; i < (int) embd_inp.size(); i++) {
417 LOG_INF("%6d -> '%s'\n", embd_inp[i], common_token_to_piece(ctx, embd_inp[i]).c_str());
418 }
419
420 if (params.n_keep > add_bos) {
421 LOG_INF("%s: static prompt based on n_keep: '", __func__);
422 for (int i = 0; i < params.n_keep; i++) {
423 LOG_CNT("%s", common_token_to_piece(ctx, embd_inp[i]).c_str());
424 }
425 LOG_CNT("'\n");
426 }
427 LOG_INF("\n");
428 }
429
430 // ctrl+C handling
431 {
432#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
433 struct sigaction sigint_action;
434 sigint_action.sa_handler = sigint_handler;
435 sigemptyset (&sigint_action.sa_mask);
436 sigint_action.sa_flags = 0;
437 sigaction(SIGINT, &sigint_action, NULL);
438#elif defined (_WIN32)
439 auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
440 return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
441 };
442 SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
443#endif
444 }
445
446 if (params.interactive) {
447 LOG_INF("%s: interactive mode on.\n", __func__);
448
449 if (!params.antiprompt.empty()) {
450 for (const auto & antiprompt : params.antiprompt) {
451 LOG_INF("Reverse prompt: '%s'\n", antiprompt.c_str());
452 if (params.verbose_prompt) {
453 auto tmp = common_tokenize(ctx, antiprompt, false, true);
454 for (int i = 0; i < (int) tmp.size(); i++) {
455 LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
456 }
457 }
458 }
459 }
460
461 if (params.input_prefix_bos) {
462 LOG_INF("Input prefix with BOS\n");
463 }
464
465 if (!params.input_prefix.empty()) {
466 LOG_INF("Input prefix: '%s'\n", params.input_prefix.c_str());
467 if (params.verbose_prompt) {
468 auto tmp = common_tokenize(ctx, params.input_prefix, true, true);
469 for (int i = 0; i < (int) tmp.size(); i++) {
470 LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
471 }
472 }
473 }
474
475 if (!params.input_suffix.empty()) {
476 LOG_INF("Input suffix: '%s'\n", params.input_suffix.c_str());
477 if (params.verbose_prompt) {
478 auto tmp = common_tokenize(ctx, params.input_suffix, false, true);
479 for (int i = 0; i < (int) tmp.size(); i++) {
480 LOG_INF("%6d -> '%s'\n", tmp[i], common_token_to_piece(ctx, tmp[i]).c_str());
481 }
482 }
483 }
484 }
485
486 LOG_INF("sampler seed: %u\n", common_sampler_get_seed(smpl));
487 LOG_INF("sampler params: \n%s\n", sparams.print().c_str());
488 LOG_INF("sampler chain: %s\n", common_sampler_print(smpl).c_str());
489
490 LOG_INF("generate: n_ctx = %d, n_batch = %d, n_predict = %d, n_keep = %d\n", n_ctx, params.n_batch, params.n_predict, params.n_keep);
491
492 // group-attention state
493 // number of grouped KV tokens so far (used only if params.grp_attn_n > 1)
494 int ga_i = 0;
495
496 const int ga_n = params.grp_attn_n;
497 const int ga_w = params.grp_attn_w;
498
499 if (ga_n != 1) {
500 GGML_ASSERT(ga_n > 0 && "grp_attn_n must be positive"); // NOLINT
501 GGML_ASSERT(ga_w % ga_n == 0 && "grp_attn_w must be a multiple of grp_attn_n"); // NOLINT
502 //GGML_ASSERT(n_ctx_train % ga_w == 0 && "n_ctx_train must be a multiple of grp_attn_w"); // NOLINT
503 //GGML_ASSERT(n_ctx >= n_ctx_train * ga_n && "n_ctx must be at least n_ctx_train * grp_attn_n"); // NOLINT
504 LOG_INF("self-extend: n_ctx_train = %d, grp_attn_n = %d, grp_attn_w = %d\n", n_ctx_train, ga_n, ga_w);
505 }
506 LOG_INF("\n");
507
508 if (params.interactive) {
509 const char * control_message;
510 if (params.multiline_input) {
511 control_message = " - To return control to the AI, end your input with '\\'.\n"
512 " - To return control without starting a new line, end your input with '/'.\n";
513 } else {
514 control_message = " - Press Return to return control to the AI.\n"
515 " - To return control without starting a new line, end your input with '/'.\n"
516 " - If you want to submit another line, end your input with '\\'.\n";
517 }
518 LOG_INF("== Running in interactive mode. ==\n");
519#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
520 LOG_INF( " - Press Ctrl+C to interject at any time.\n");
521#endif
522 LOG_INF( "%s", control_message);
523 if (params.conversation_mode && params.enable_chat_template && params.system_prompt.empty()) {
524 LOG_INF( " - Not using system message. To change it, set a different value via -sys PROMPT\n");
525 }
526 LOG_INF("\n");
527
528 is_interacting = params.interactive_first;
529 }
530
531 bool is_antiprompt = false;
532 bool input_echo = true;
533 bool display = true;
534
535 int n_past = 0;
536 int n_remain = params.n_predict;
537 int n_consumed = 0;
538 int n_session_consumed = 0;
539
540 std::vector<int> input_tokens; g_input_tokens = &input_tokens;
541 std::vector<int> output_tokens; g_output_tokens = &output_tokens;
542 std::ostringstream output_ss; g_output_ss = &output_ss;
543 std::ostringstream assistant_ss; // for storing current assistant message, used in conversation mode
544
545 // the first thing we will do is to output the prompt, so set color accordingly
546 console::set_display(DISPLAY_TYPE_PROMPT);
547 display = params.display_prompt;
548
549 std::vector<llama_token> embd;
550
551 // single-token antiprompts
552 std::vector<llama_token> antiprompt_token;
553
554 for (const std::string & antiprompt : params.antiprompt) {
555 auto ids = ::common_tokenize(ctx, antiprompt, false, true);
556 if (ids.size() == 1) {
557 antiprompt_token.push_back(ids[0]);
558 }
559 }
560
561 if (llama_model_has_encoder(model)) {
562 int enc_input_size = embd_inp.size();
563 llama_token * enc_input_buf = embd_inp.data();
564
565 if (llama_encode(ctx, llama_batch_get_one(enc_input_buf, enc_input_size))) {
566 LOG_ERR("%s : failed to eval\n", __func__);
567 return 1;
568 }
569
570 llama_token decoder_start_token_id = llama_model_decoder_start_token(model);
571 if (decoder_start_token_id == LLAMA_TOKEN_NULL) {
572 decoder_start_token_id = llama_vocab_bos(vocab);
573 }
574
575 embd_inp.clear();
576 embd_inp.push_back(decoder_start_token_id);
577 }
578
579 while ((n_remain != 0 && !is_antiprompt) || params.interactive) {
580 // predict
581 if (!embd.empty()) {
582 // Note: (n_ctx - 4) here is to match the logic for commandline prompt handling via
583 // --prompt or --file which uses the same value.
584 int max_embd_size = n_ctx - 4;
585
586 // Ensure the input doesn't exceed the context size by truncating embd if necessary.
587 if ((int) embd.size() > max_embd_size) {
588 const int skipped_tokens = (int) embd.size() - max_embd_size;
589 embd.resize(max_embd_size);
590
591 console::set_display(DISPLAY_TYPE_ERROR);
592 LOG_WRN("<<input too long: skipped %d token%s>>", skipped_tokens, skipped_tokens != 1 ? "s" : "");
593 console::set_display(DISPLAY_TYPE_RESET);
594 }
595
596 if (ga_n == 1) {
597 // infinite text generation via context shifting
598 // if we run out of context:
599 // - take the n_keep first tokens from the original prompt (via n_past)
600 // - take half of the last (n_ctx - n_keep) tokens and recompute the logits in batches
601
602 if (n_past + (int) embd.size() >= n_ctx) {
603 if (!params.ctx_shift){
604 LOG_WRN("\n\n%s: context full and context shift is disabled => stopping\n", __func__);
605 break;
606 }
607
608 if (params.n_predict == -2) {
609 LOG_WRN("\n\n%s: context full and n_predict == %d => stopping\n", __func__, params.n_predict);
610 break;
611 }
612
613 const int n_left = n_past - params.n_keep;
614 const int n_discard = n_left/2;
615
616 LOG_DBG("context full, swapping: n_past = %d, n_left = %d, n_ctx = %d, n_keep = %d, n_discard = %d\n",
617 n_past, n_left, n_ctx, params.n_keep, n_discard);
618
619 llama_memory_seq_rm (mem, 0, params.n_keep , params.n_keep + n_discard);
620 llama_memory_seq_add(mem, 0, params.n_keep + n_discard, n_past, -n_discard);
621
622 n_past -= n_discard;
623
624 LOG_DBG("after swap: n_past = %d\n", n_past);
625
626 LOG_DBG("embd: %s\n", string_from(ctx, embd).c_str());
627
628 LOG_DBG("clear session path\n");
629 path_session.clear();
630 }
631 } else {
632 // context extension via Self-Extend
633 while (n_past >= ga_i + ga_w) {
634 const int ib = (ga_n*ga_i)/ga_w;
635 const int bd = (ga_w/ga_n)*(ga_n - 1);
636 const int dd = (ga_w/ga_n) - ib*bd - ga_w;
637
638 LOG_DBG("\n");
639 LOG_DBG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i, n_past, ib*bd, ga_i + ib*bd, n_past + ib*bd);
640 LOG_DBG("div: [%6d, %6d] / %6d -> [%6d, %6d]\n", ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n, (ga_i + ib*bd)/ga_n, (ga_i + ib*bd + ga_w)/ga_n);
641 LOG_DBG("shift: [%6d, %6d] + %6d -> [%6d, %6d]\n", ga_i + ib*bd + ga_w, n_past + ib*bd, dd, ga_i + ib*bd + ga_w + dd, n_past + ib*bd + dd);
642
643 llama_memory_seq_add(mem, 0, ga_i, n_past, ib*bd);
644 llama_memory_seq_div(mem, 0, ga_i + ib*bd, ga_i + ib*bd + ga_w, ga_n);
645 llama_memory_seq_add(mem, 0, ga_i + ib*bd + ga_w, n_past + ib*bd, dd);
646
647 n_past -= bd;
648
649 ga_i += ga_w/ga_n;
650
651 LOG_DBG("\nn_past_old = %d, n_past = %d, ga_i = %d\n\n", n_past + bd, n_past, ga_i);
652 }
653 }
654
655 // try to reuse a matching prefix from the loaded session instead of re-eval (via n_past)
656 if (n_session_consumed < (int) session_tokens.size()) {
657 size_t i = 0;
658 for ( ; i < embd.size(); i++) {
659 if (embd[i] != session_tokens[n_session_consumed]) {
660 session_tokens.resize(n_session_consumed);
661 break;
662 }
663
664 n_past++;
665 n_session_consumed++;
666
667 if (n_session_consumed >= (int) session_tokens.size()) {
668 ++i;
669 break;
670 }
671 }
672 if (i > 0) {
673 embd.erase(embd.begin(), embd.begin() + i);
674 }
675 }
676
677 if (!embd.empty()) {
678 int n_eval = (int) embd.size();
679 LOG_DBG("eval: %s\n", string_from(ctx, embd).c_str());
680
681 GGML_ASSERT(n_eval <= params.n_batch);
682 if (llama_decode(ctx, llama_batch_get_one(embd.data(), n_eval))) {
683 LOG_ERR("%s : failed to eval\n", __func__);
684 return 1;
685 }
686
687 n_past += n_eval;
688
689 LOG_DBG("n_past = %d\n", n_past);
690 // Display total tokens alongside total time
691 if (params.n_print > 0 && n_past % params.n_print == 0) {
692 LOG_DBG("\n\033[31mTokens consumed so far = %d / %d \033[0m\n", n_past, n_ctx);
693 }
694 }
695
696 if (!embd.empty() && !path_session.empty()) {
697 session_tokens.insert(session_tokens.end(), embd.begin(), embd.end());
698 n_session_consumed = session_tokens.size();
699 }
700 }
701
702 embd.clear();
703
704 if ((int) embd_inp.size() <= n_consumed && !is_interacting) {
705 // optionally save the session on first sample (for faster prompt loading next time)
706 if (session_do_save) {
707 session_do_save = false;
708 llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
709
710 LOG_DBG("saved session to %s\n", path_session.c_str());
711 }
712
713 const llama_token id = common_sampler_sample(smpl, ctx, -1);
714
715 common_sampler_accept(smpl, id, /* accept_grammar= */ true);
716
717 // LOG_DBG("last: %s\n", string_from(ctx, smpl->prev.to_vector()).c_str());
718
719 embd.push_back(id);
720
721 if (params.conversation_mode && !waiting_for_first_input && !llama_vocab_is_eog(vocab, id)) {
722 assistant_ss << common_token_to_piece(ctx, id, false);
723 }
724
725 // echo this to console
726 input_echo = true;
727
728 // decrement remaining sampling budget
729 --n_remain;
730
731 LOG_DBG("n_remain: %d\n", n_remain);
732 } else {
733 // some user input remains from prompt or interaction, forward it to processing
734 LOG_DBG("embd_inp.size(): %d, n_consumed: %d\n", (int) embd_inp.size(), n_consumed);
735 while ((int) embd_inp.size() > n_consumed) {
736 embd.push_back(embd_inp[n_consumed]);
737
738 // push the prompt in the sampling context in order to apply repetition penalties later
739 // for the prompt, we don't apply grammar rules
740 common_sampler_accept(smpl, embd_inp[n_consumed], /* accept_grammar= */ false);
741
742 ++n_consumed;
743 if ((int) embd.size() == params.n_batch) {
744 break;
745 }
746 }
747 }
748
749 // display text
750 if (input_echo && display) {
751 for (auto id : embd) {
752 const std::string token_str = common_token_to_piece(ctx, id, params.special);
753
754 // Console/Stream Output
755 LOG("%s", token_str.c_str());
756
757 // Record Displayed Tokens To Log
758 // Note: Generated tokens are created one by one hence this check
759 if (embd.size() > 1) {
760 // Incoming Requested Tokens
761 input_tokens.push_back(id);
762 } else {
763 // Outgoing Generated Tokens
764 output_tokens.push_back(id);
765 output_ss << token_str;
766 }
767 }
768 }
769
770 // reset color to default if there is no pending user input
771 if (input_echo && (int) embd_inp.size() == n_consumed) {
772 console::set_display(DISPLAY_TYPE_RESET);
773 display = true;
774 }
775
776 // if not currently processing queued inputs;
777 if ((int) embd_inp.size() <= n_consumed) {
778 // check for reverse prompt in the last n_prev tokens
779 if (!params.antiprompt.empty()) {
780 const int n_prev = 32;
781 const std::string last_output = common_sampler_prev_str(smpl, ctx, n_prev);
782
783 is_antiprompt = false;
784 // Check if each of the reverse prompts appears at the end of the output.
785 // If we're not running interactively, the reverse prompt might be tokenized with some following characters
786 // so we'll compensate for that by widening the search window a bit.
787 for (std::string & antiprompt : params.antiprompt) {
788 size_t extra_padding = params.interactive ? 0 : 2;
789 size_t search_start_pos = last_output.length() > static_cast<size_t>(antiprompt.length() + extra_padding)
790 ? last_output.length() - static_cast<size_t>(antiprompt.length() + extra_padding)
791 : 0;
792
793 if (last_output.find(antiprompt, search_start_pos) != std::string::npos) {
794 if (params.interactive) {
795 is_interacting = true;
796 }
797 is_antiprompt = true;
798 break;
799 }
800 }
801
802 // check for reverse prompt using special tokens
803 // avoid calling common_sampler_last() if last_output is empty
804 if (!last_output.empty()) {
805 llama_token last_token = common_sampler_last(smpl);
806 for (auto token : antiprompt_token) {
807 if (token == last_token) {
808 if (params.interactive) {
809 is_interacting = true;
810 }
811 is_antiprompt = true;
812 break;
813 }
814 }
815 }
816
817 if (is_antiprompt) {
818 LOG_DBG("found antiprompt: %s\n", last_output.c_str());
819 }
820 }
821
822 // deal with end of generation tokens in interactive mode
823 if (!waiting_for_first_input && llama_vocab_is_eog(vocab, common_sampler_last(smpl))) {
824 LOG_DBG("found an EOG token\n");
825
826 if (params.interactive) {
827 if (!params.antiprompt.empty()) {
828 // tokenize and inject first reverse prompt
829 const auto first_antiprompt = common_tokenize(ctx, params.antiprompt.front(), false, true);
830 embd_inp.insert(embd_inp.end(), first_antiprompt.begin(), first_antiprompt.end());
831 is_antiprompt = true;
832 }
833
834 if (params.enable_chat_template) {
835 chat_add_and_format("assistant", assistant_ss.str());
836 }
837 is_interacting = true;
838 LOG("\n");
839 }
840 }
841
842 if (params.conversation_mode && !waiting_for_first_input) {
843 if (!prompt.empty()) {
844 prompt.clear();
845 is_interacting = false;
846 }
847 }
848
849 if ((n_past > 0 || waiting_for_first_input) && is_interacting) {
850 LOG_DBG("waiting for user input\n");
851
852 if (params.conversation_mode) {
853 LOG("\n> ");
854 }
855
856 if (params.input_prefix_bos) {
857 LOG_DBG("adding input prefix BOS token\n");
858 embd_inp.push_back(llama_vocab_bos(vocab));
859 }
860
861 std::string buffer;
862 if (!params.input_prefix.empty() && !params.conversation_mode) {
863 LOG_DBG("appending input prefix: '%s'\n", params.input_prefix.c_str());
864 LOG("%s", params.input_prefix.c_str());
865 }
866
867 // color user input only
868 console::set_display(DISPLAY_TYPE_USER_INPUT);
869 display = params.display_prompt;
870
871 std::string line;
872 bool another_line = true;
873 do {
874 another_line = console::readline(line, params.multiline_input);
875 buffer += line;
876 } while (another_line);
877
878 // done taking input, reset color
879 console::set_display(DISPLAY_TYPE_RESET);
880 display = true;
881
882 if (buffer.empty()) { // Ctrl+D on empty line exits
883 LOG("EOF by user\n");
884 break;
885 }
886
887 if (buffer.back() == '\n') {
888 // Implement #587:
889 // If the user wants the text to end in a newline,
890 // this should be accomplished by explicitly adding a newline by using \ followed by return,
891 // then returning control by pressing return again.
892 buffer.pop_back();
893 }
894
895 if (buffer.empty()) { // Enter key on empty line lets the user pass control back
896 LOG_DBG("empty line, passing control back\n");
897 } else { // Add tokens to embd only if the input buffer is non-empty
898 // append input suffix if any
899 if (!params.input_suffix.empty() && !params.conversation_mode) {
900 LOG_DBG("appending input suffix: '%s'\n", params.input_suffix.c_str());
901 LOG("%s", params.input_suffix.c_str());
902 }
903
904 LOG_DBG("buffer: '%s'\n", buffer.c_str());
905
906 const size_t original_size = embd_inp.size();
907
908 if (params.escape) {
909 string_process_escapes(buffer);
910 }
911
912 bool format_chat = params.conversation_mode && params.enable_chat_template;
913 std::string user_inp = format_chat
914 ? chat_add_and_format("user", std::move(buffer))
915 : std::move(buffer);
916 // TODO: one inconvenient of current chat template implementation is that we can't distinguish between user input and special tokens (prefix/postfix)
917 const auto line_pfx = common_tokenize(ctx, params.input_prefix, false, true);
918 const auto line_inp = common_tokenize(ctx, user_inp, false, format_chat);
919 const auto line_sfx = common_tokenize(ctx, params.input_suffix, false, true);
920
921 LOG_DBG("input tokens: %s\n", string_from(ctx, line_inp).c_str());
922
923 // if user stop generation mid-way, we must add EOT to finish model's last response
924 if (need_insert_eot && format_chat) {
925 llama_token eot = llama_vocab_eot(vocab);
926 embd_inp.push_back(eot == LLAMA_TOKEN_NULL ? llama_vocab_eos(vocab) : eot);
927 need_insert_eot = false;
928 }
929
930 embd_inp.insert(embd_inp.end(), line_pfx.begin(), line_pfx.end());
931 embd_inp.insert(embd_inp.end(), line_inp.begin(), line_inp.end());
932 embd_inp.insert(embd_inp.end(), line_sfx.begin(), line_sfx.end());
933
934 if (params.verbose_prompt) {
935 LOG_INF("%s: number of tokens in prompt = %zu\n", __func__, embd_inp.size() - original_size);
936 }
937
938 for (size_t i = original_size; i < embd_inp.size(); ++i) {
939 const llama_token token = embd_inp[i];
940 const std::string token_str = common_token_to_piece(ctx, token);
941 output_tokens.push_back(token);
942 output_ss << token_str;
943
944 if (params.verbose_prompt) {
945 LOG_INF("%6d -> '%s'\n", token, token_str.c_str());
946 }
947 }
948
949 // reset assistant message
950 assistant_ss.str("");
951
952 n_remain -= line_inp.size();
953 LOG_DBG("n_remain: %d\n", n_remain);
954 }
955
956 input_echo = false; // do not echo this again
957 }
958
959 if (n_past > 0 || waiting_for_first_input) {
960 if (is_interacting) {
961 common_sampler_reset(smpl);
962 }
963 is_interacting = false;
964
965 if (waiting_for_first_input && params.single_turn) {
966 params.interactive = false;
967 params.interactive_first = false;
968 }
969 waiting_for_first_input = false;
970 }
971 }
972
973 // end of generation
974 if (!embd.empty() && llama_vocab_is_eog(vocab, embd.back()) && !(params.interactive)) {
975 LOG(" [end of text]\n");
976 break;
977 }
978
979 // In interactive mode, respect the maximum number of tokens and drop back to user input when reached.
980 // We skip this logic when n_predict == -1 (infinite) or -2 (stop at context size).
981 if (params.interactive && n_remain <= 0 && params.n_predict >= 0) {
982 n_remain = params.n_predict;
983 is_interacting = true;
984 }
985 }
986
987 if (!path_session.empty() && params.prompt_cache_all && !params.prompt_cache_ro) {
988 LOG("\n%s: saving final output to session file '%s'\n", __func__, path_session.c_str());
989 llama_state_save_file(ctx, path_session.c_str(), session_tokens.data(), session_tokens.size());
990 }
991
992 LOG("\n\n");
993 common_perf_print(ctx, smpl);
994
995 llama_backend_free();
996
997 ggml_threadpool_free_fn(threadpool);
998 ggml_threadpool_free_fn(threadpool_batch);
999
1000 return 0;
1001}