1#include "arg.h"
  2#include "debug.h"
  3#include "log.h"
  4#include "common.h"
  5#include "sampling.h"
  6#include "llama.h"
  7#include "ggml.h"
  8#include "console.h"
  9#include "chat.h"
 10#include "mtmd.h"
 11#include "mtmd-helper.h"
 12
 13#include <vector>
 14#include <limits.h>
 15#include <cinttypes>
 16
 17#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
 18#include <signal.h>
 19#include <unistd.h>
 20#elif defined (_WIN32)
 21#define WIN32_LEAN_AND_MEAN
 22#ifndef NOMINMAX
 23#define NOMINMAX
 24#endif
 25#include <windows.h>
 26#include <signal.h>
 27#endif
 28
 29// volatile, because of signal being an interrupt
 30static volatile bool g_is_generating = false;
 31static volatile bool g_is_interrupted = false;
 32
 33/**
 34 * Please note that this is NOT a production-ready stuff.
 35 * It is a playground for trying multimodal support in llama.cpp.
 36 * For contributors: please keep this code simple and easy to understand.
 37 */
 38
 39static void show_additional_info(int /*argc*/, char ** argv) {
 40    LOG(
 41        "Experimental CLI for multimodal\n\n"
 42        "Usage: %s [options] -m <model> --mmproj <mmproj> --image <image> --audio <audio> -p <prompt>\n\n"
 43        "  -m and --mmproj are required\n"
 44        "  -hf user/repo can replace both -m and --mmproj in most cases\n"
 45        "  --image, --audio and -p are optional, if NOT provided, the CLI will run in chat mode\n"
 46        "  to disable using GPU for mmproj model, add --no-mmproj-offload\n",
 47        argv[0]
 48    );
 49}
 50
 51#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__)) || defined (_WIN32)
 52static void sigint_handler(int signo) {
 53    if (signo == SIGINT) {
 54        if (g_is_generating) {
 55            g_is_generating = false;
 56        } else {
 57            console::cleanup();
 58            if (g_is_interrupted) {
 59                _exit(1);
 60            }
 61            g_is_interrupted = true;
 62        }
 63    }
 64}
 65#endif
 66
 67struct mtmd_cli_context {
 68    mtmd::context_ptr ctx_vision;
 69    common_init_result_ptr llama_init;
 70
 71    llama_model       * model;
 72    llama_context     * lctx;
 73    const llama_vocab * vocab;
 74    common_sampler    * smpl;
 75    llama_batch         batch;
 76    int                 n_batch;
 77
 78    mtmd::bitmaps bitmaps;
 79
 80    // chat template
 81    common_chat_templates_ptr tmpls;
 82    std::vector<common_chat_msg> chat_history;
 83    bool use_jinja = false;
 84    // TODO: support for --system-prompt with /clear command
 85
 86    // support for legacy templates (models not having EOT token)
 87    llama_tokens antiprompt_tokens;
 88
 89    int n_threads    = 1;
 90    llama_pos n_past = 0;
 91
 92    base_callback_data cb_data;
 93
 94    mtmd_cli_context(common_params & params) : llama_init(common_init_from_params(params)) {
 95        model = llama_init->model();
 96        lctx = llama_init->context();
 97        vocab = llama_model_get_vocab(model);
 98        smpl = common_sampler_init(model, params.sampling);
 99        n_threads = params.cpuparams.n_threads;
100        batch = llama_batch_init(1, 0, 1); // batch for next token generation
101        n_batch = params.n_batch;
102
103        if (!model || !lctx) {
104            exit(1);
105        }
106
107        if (!llama_model_chat_template(model, nullptr) && params.chat_template.empty()) {
108            LOG_ERR("Model does not have chat template.\n");
109            LOG_ERR("  For old llava models, you may need to use '--chat-template vicuna'\n");
110            LOG_ERR("  For MobileVLM models, use '--chat-template deepseek'\n");
111            LOG_ERR("  For Mistral Small 3.1, use '--chat-template mistral-v7'\n");
112            exit(1);
113        }
114
115        tmpls = common_chat_templates_init(model, params.chat_template);
116        use_jinja = params.use_jinja;
117        chat_history.clear();
118        LOG_INF("%s: chat template example:\n%s\n", __func__, common_chat_format_example(tmpls.get(), params.use_jinja, params.default_template_kwargs).c_str());
119
120        init_vision_context(params);
121
122        // load antiprompt tokens for legacy templates
123        if (params.chat_template == "vicuna") {
124            antiprompt_tokens = common_tokenize(lctx, "ASSISTANT:", false, true);
125        } else if (params.chat_template == "deepseek") {
126            antiprompt_tokens = common_tokenize(lctx, "###", false, true);
127        }
128    }
129
130    ~mtmd_cli_context() {
131        llama_batch_free(batch);
132        common_sampler_free(smpl);
133    }
134
135    void init_vision_context(common_params & params) {
136        const char * clip_path = params.mmproj.path.c_str();
137        mtmd_context_params mparams = mtmd_context_params_default();
138        mparams.use_gpu          = params.mmproj_use_gpu;
139        mparams.print_timings    = true;
140        mparams.n_threads        = params.cpuparams.n_threads;
141        mparams.flash_attn_type  = params.flash_attn_type;
142        mparams.warmup           = params.warmup;
143        mparams.image_min_tokens = params.image_min_tokens;
144        mparams.image_max_tokens = params.image_max_tokens;
145        if (std::getenv("MTMD_DEBUG_GRAPH") != nullptr) {
146            mparams.cb_eval_user_data = &cb_data;
147            mparams.cb_eval = common_debug_cb_eval<false>;
148        }
149        ctx_vision.reset(mtmd_init_from_file(clip_path, model, mparams));
150        if (!ctx_vision.get()) {
151            LOG_ERR("Failed to load vision model from %s\n", clip_path);
152            exit(1);
153        }
154    }
155
156    bool check_antiprompt(const llama_tokens & generated_tokens) {
157        if (antiprompt_tokens.empty() || generated_tokens.size() < antiprompt_tokens.size()) {
158            return false;
159        }
160        return std::equal(
161            generated_tokens.end() - antiprompt_tokens.size(),
162            generated_tokens.end(),
163            antiprompt_tokens.begin()
164        );
165    }
166
167    bool load_media(const std::string & fname) {
168        mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_file(ctx_vision.get(), fname.c_str()));
169        if (!bmp.ptr) {
170            return false;
171        }
172        bitmaps.entries.push_back(std::move(bmp));
173        return true;
174    }
175};
176
177static int generate_response(mtmd_cli_context & ctx, int n_predict) {
178    llama_tokens generated_tokens;
179    for (int i = 0; i < n_predict; i++) {
180        if (i > n_predict || !g_is_generating || g_is_interrupted) {
181            LOG("\n");
182            break;
183        }
184
185        llama_token token_id = common_sampler_sample(ctx.smpl, ctx.lctx, -1);
186        generated_tokens.push_back(token_id);
187        common_sampler_accept(ctx.smpl, token_id, true);
188
189        if (llama_vocab_is_eog(ctx.vocab, token_id) || ctx.check_antiprompt(generated_tokens)) {
190            LOG("\n");
191            break; // end of generation
192        }
193
194        LOG("%s", common_token_to_piece(ctx.lctx, token_id).c_str());
195        fflush(stdout);
196
197        if (g_is_interrupted) {
198            LOG("\n");
199            break;
200        }
201
202        // eval the token
203        common_batch_clear(ctx.batch);
204        common_batch_add(ctx.batch, token_id, ctx.n_past++, {0}, true);
205        if (llama_decode(ctx.lctx, ctx.batch)) {
206            LOG_ERR("failed to decode token\n");
207            return 1;
208        }
209    }
210
211    std::string generated_text = common_detokenize(ctx.lctx, generated_tokens);
212    common_chat_msg msg;
213    msg.role    = "assistant";
214    msg.content = generated_text;
215    ctx.chat_history.push_back(std::move(msg));
216
217    return 0;
218}
219
220static std::string chat_add_and_format(mtmd_cli_context & ctx, common_chat_msg & new_msg) {
221    LOG_DBG("chat_add_and_format: new_msg.role='%s', new_msg.content='%s'\n",
222        new_msg.role.c_str(), new_msg.content.c_str());
223    auto formatted = common_chat_format_single(ctx.tmpls.get(), ctx.chat_history,
224        new_msg, new_msg.role == "user",
225        ctx.use_jinja);
226    ctx.chat_history.push_back(new_msg);
227    return formatted;
228}
229
230static int eval_message(mtmd_cli_context & ctx, common_chat_msg & msg) {
231    bool add_bos = ctx.chat_history.empty();
232    auto formatted_chat = chat_add_and_format(ctx, msg);
233    LOG_DBG("formatted_chat.prompt: %s\n", formatted_chat.c_str());
234
235    mtmd_input_text text;
236    text.text          = formatted_chat.c_str();
237    text.add_special   = add_bos;
238    text.parse_special = true;
239
240    if (g_is_interrupted) return 0;
241
242    mtmd::input_chunks chunks(mtmd_input_chunks_init());
243    auto bitmaps_c_ptr = ctx.bitmaps.c_ptr();
244    int32_t res = mtmd_tokenize(ctx.ctx_vision.get(),
245                        chunks.ptr.get(), // output
246                        &text, // text
247                        bitmaps_c_ptr.data(),
248                        bitmaps_c_ptr.size());
249    if (res != 0) {
250        LOG_ERR("Unable to tokenize prompt, res = %d\n", res);
251        return 1;
252    }
253
254    ctx.bitmaps.entries.clear();
255
256    llama_pos new_n_past;
257    if (mtmd_helper_eval_chunks(ctx.ctx_vision.get(),
258                ctx.lctx, // lctx
259                chunks.ptr.get(), // chunks
260                ctx.n_past, // n_past
261                0, // seq_id
262                ctx.n_batch, // n_batch
263                true, // logits_last
264                &new_n_past)) {
265        LOG_ERR("Unable to eval prompt\n");
266        return 1;
267    }
268
269    ctx.n_past = new_n_past;
270
271    LOG("\n");
272
273    return 0;
274}
275
276int main(int argc, char ** argv) {
277    ggml_time_init();
278
279    common_params params;
280
281    if (!common_params_parse(argc, argv, params, LLAMA_EXAMPLE_MTMD, show_additional_info)) {
282        return 1;
283    }
284
285    common_init();
286    mtmd_helper_log_set(common_log_default_callback, nullptr);
287
288    if (params.mmproj.path.empty()) {
289        show_additional_info(argc, argv);
290        LOG_ERR("ERR: Missing --mmproj argument\n");
291        return 1;
292    }
293
294    mtmd_cli_context ctx(params);
295    LOG_INF("%s: loading model: %s\n", __func__, params.model.path.c_str());
296
297    bool is_single_turn = !params.prompt.empty() && !params.image.empty();
298
299    int n_predict = params.n_predict < 0 ? INT_MAX : params.n_predict;
300
301    // Ctrl+C handling
302    {
303#if defined (__unix__) || (defined (__APPLE__) && defined (__MACH__))
304        struct sigaction sigint_action;
305        sigint_action.sa_handler = sigint_handler;
306        sigemptyset (&sigint_action.sa_mask);
307        sigint_action.sa_flags = 0;
308        sigaction(SIGINT, &sigint_action, NULL);
309#elif defined (_WIN32)
310        auto console_ctrl_handler = +[](DWORD ctrl_type) -> BOOL {
311            return (ctrl_type == CTRL_C_EVENT) ? (sigint_handler(SIGINT), true) : false;
312        };
313        SetConsoleCtrlHandler(reinterpret_cast<PHANDLER_ROUTINE>(console_ctrl_handler), true);
314#endif
315    }
316
317    if (g_is_interrupted) return 130;
318
319    auto eval_system_prompt_if_present = [&] {
320        if (params.system_prompt.empty()) {
321            return 0;
322        }
323
324        common_chat_msg msg;
325        msg.role = "system";
326        msg.content = params.system_prompt;
327        return eval_message(ctx, msg);
328    };
329
330    LOG_WRN("WARN: This is an experimental CLI for testing multimodal capability.\n");
331    LOG_WRN("      For normal use cases, please use the standard llama-cli\n");
332
333    if (eval_system_prompt_if_present()) {
334        return 1;
335    }
336
337    if (is_single_turn) {
338        g_is_generating = true;
339        if (params.prompt.find(mtmd_default_marker()) == std::string::npos) {
340            for (size_t i = 0; i < params.image.size(); i++) {
341                // most models require the marker before each image
342                // ref: https://github.com/ggml-org/llama.cpp/pull/17616
343                params.prompt = mtmd_default_marker() + params.prompt;
344            }
345        }
346
347        common_chat_msg msg;
348        msg.role = "user";
349        msg.content = params.prompt;
350        for (const auto & image : params.image) {
351            if (!ctx.load_media(image)) {
352                return 1; // error is already printed by libmtmd
353            }
354        }
355        if (eval_message(ctx, msg)) {
356            return 1;
357        }
358        if (!g_is_interrupted && generate_response(ctx, n_predict)) {
359            return 1;
360        }
361
362    } else {
363        LOG("\n Running in chat mode, available commands:");
364        if (mtmd_support_vision(ctx.ctx_vision.get())) {
365            LOG("\n   /image <path>    load an image");
366        }
367        if (mtmd_support_audio(ctx.ctx_vision.get())) {
368            LOG("\n   /audio <path>    load an audio");
369        }
370        LOG("\n   /clear           clear the chat history");
371        LOG("\n   /quit or /exit   exit the program");
372        LOG("\n");
373
374        std::string content;
375
376        while (!g_is_interrupted) {
377            g_is_generating = false;
378            LOG("\n> ");
379            console::set_display(DISPLAY_TYPE_USER_INPUT);
380            std::string line;
381            console::readline(line, false);
382            if (g_is_interrupted) break;
383            console::set_display(DISPLAY_TYPE_RESET);
384            line = string_strip(line);
385            if (line.empty()) {
386                continue;
387            }
388            if (line == "/quit" || line == "/exit") {
389                break;
390            }
391            if (line == "/clear") {
392                ctx.n_past = 0;
393                ctx.chat_history.clear();
394                llama_memory_clear(llama_get_memory(ctx.lctx), true);
395                if (eval_system_prompt_if_present()) {
396                    return 1;
397                }
398                LOG("Chat history cleared\n\n");
399                continue;
400            }
401            g_is_generating = true;
402            bool is_image = line == "/image" || line.find("/image ") == 0;
403            bool is_audio = line == "/audio" || line.find("/audio ") == 0;
404            if (is_image || is_audio) {
405                if (line.size() < 8) {
406                    LOG_ERR("ERR: Missing media filename\n");
407                    continue;
408                }
409                std::string media_path = line.substr(7);
410                if (ctx.load_media(media_path)) {
411                    LOG("%s %s loaded\n", media_path.c_str(), is_image ? "image" : "audio");
412                    content += mtmd_default_marker();
413                }
414                // else, error is already printed by libmtmd
415                continue;
416            } else {
417                content += line;
418            }
419            common_chat_msg msg;
420            msg.role = "user";
421            msg.content = content;
422            int ret = eval_message(ctx, msg);
423            if (ret) {
424                return 1;
425            }
426            if (g_is_interrupted) break;
427            if (generate_response(ctx, n_predict)) {
428                return 1;
429            }
430            content.clear();
431        }
432    }
433    if (g_is_interrupted) LOG("\nInterrupted by user\n");
434    LOG("\n\n");
435    llama_perf_context_print(ctx.lctx);
436    return g_is_interrupted ? 130 : 0;
437}