1#include "common.h"
   2#include "download.h"
   3#include "log.h"
   4#include "llama.h"
   5#include "mtmd.h"
   6#include "mtmd-helper.h"
   7#include "chat.h"
   8#include "base64.hpp"
   9
  10#include "server-common.h"
  11
  12#include <random>
  13#include <sstream>
  14#include <fstream>
  15
  16json format_error_response(const std::string & message, const enum error_type type) {
  17    std::string type_str;
  18    int code = 500;
  19    switch (type) {
  20        case ERROR_TYPE_INVALID_REQUEST:
  21            type_str = "invalid_request_error";
  22            code = 400;
  23            break;
  24        case ERROR_TYPE_AUTHENTICATION:
  25            type_str = "authentication_error";
  26            code = 401;
  27            break;
  28        case ERROR_TYPE_NOT_FOUND:
  29            type_str = "not_found_error";
  30            code = 404;
  31            break;
  32        case ERROR_TYPE_SERVER:
  33            type_str = "server_error";
  34            code = 500;
  35            break;
  36        case ERROR_TYPE_PERMISSION:
  37            type_str = "permission_error";
  38            code = 403;
  39            break;
  40        case ERROR_TYPE_NOT_SUPPORTED:
  41            type_str = "not_supported_error";
  42            code = 501;
  43            break;
  44        case ERROR_TYPE_UNAVAILABLE:
  45            type_str = "unavailable_error";
  46            code = 503;
  47            break;
  48        case ERROR_TYPE_EXCEED_CONTEXT_SIZE:
  49            type_str = "exceed_context_size_error";
  50            code = 400;
  51            break;
  52    }
  53    return json {
  54        {"code", code},
  55        {"message", message},
  56        {"type", type_str},
  57    };
  58}
  59
  60//
  61// random string / id
  62//
  63
  64std::string random_string() {
  65    static const std::string str("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz");
  66
  67    std::random_device rd;
  68    std::mt19937 generator(rd());
  69
  70    std::string result(32, ' ');
  71
  72    for (int i = 0; i < 32; ++i) {
  73        result[i] = str[generator() % str.size()];
  74    }
  75
  76    return result;
  77}
  78
  79std::string gen_chatcmplid() {
  80    return "chatcmpl-" + random_string();
  81}
  82
  83std::string gen_tool_call_id() {
  84    return random_string();
  85}
  86
  87//
  88// lora utils
  89//
  90
  91bool lora_all_alora(const std::vector<common_adapter_lora_info> & loras) {
  92    bool found_alora = false;
  93    for (const auto & lora : loras) {
  94        if (lora.scale != 0) {
  95            if (llama_adapter_get_alora_n_invocation_tokens(lora.ptr) == 0) {
  96                return false;
  97            }
  98            found_alora = true;
  99        }
 100    }
 101    return found_alora;
 102}
 103
 104bool lora_should_clear_cache(
 105        const std::vector<common_adapter_lora_info> & current,
 106        const std::vector<common_adapter_lora_info> & next) {
 107
 108    // This should always be called after determining that the two sets are
 109    // _not_ equal. This assert is therefore some slightly wasted work and
 110    // should be safe to remove as long as this method is called correctly.
 111    GGML_ASSERT(!are_lora_equal(current, next));
 112
 113    return (
 114        !(lora_get_enabled_ids(current).empty() || lora_all_alora(current)) ||
 115        !lora_all_alora(next));
 116}
 117
 118std::map<int, float> parse_lora_request(const json & data) {
 119    std::map<int, float> lora;
 120
 121    // set value
 122    for (const auto & entry : data) {
 123        int id      = json_value(entry, "id", -1);
 124        float scale = json_value(entry, "scale", 0.0f);
 125        lora[id] = scale;
 126    }
 127
 128    return lora;
 129}
 130
 131bool are_lora_equal(
 132        const std::vector<common_adapter_lora_info> & l1,
 133        const std::vector<common_adapter_lora_info> & l2) {
 134    if (l1.size() != l2.size()) {
 135        return false;
 136    }
 137    for (size_t i = 0; i < l1.size(); ++i) {
 138        // we don't check lora.path to reduce the time complexity
 139        if (l1[i].scale != l2[i].scale || l1[i].ptr != l2[i].ptr) {
 140            return false;
 141        }
 142    }
 143    return true;
 144}
 145
 146std::vector<size_t> lora_get_enabled_ids(const std::vector<common_adapter_lora_info> & loras) {
 147    std::vector<size_t> enabled_ids;
 148    for (size_t i = 0; i < loras.size(); ++i) {
 149        if (loras[i].scale > 0) {
 150            enabled_ids.push_back(i);
 151        }
 152    }
 153    return enabled_ids;
 154}
 155
 156//
 157// base64 utils (TODO: use the base64::decode from base64.hpp)
 158//
 159
 160static const std::string base64_chars =
 161             "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
 162             "abcdefghijklmnopqrstuvwxyz"
 163             "0123456789+/";
 164
 165static inline bool is_base64(uint8_t c) {
 166    return (isalnum(c) || (c == '+') || (c == '/'));
 167}
 168
 169static inline raw_buffer base64_decode(const std::string & encoded_string) {
 170    int i = 0;
 171    int j = 0;
 172    int in_ = 0;
 173
 174    int in_len = encoded_string.size();
 175
 176    uint8_t char_array_4[4];
 177    uint8_t char_array_3[3];
 178
 179    raw_buffer ret;
 180
 181    while (in_len-- && (encoded_string[in_] != '=') && is_base64(encoded_string[in_])) {
 182        char_array_4[i++] = encoded_string[in_]; in_++;
 183        if (i == 4) {
 184            for (i = 0; i < 4; i++) {
 185                char_array_4[i] = base64_chars.find(char_array_4[i]);
 186            }
 187
 188            char_array_3[0] = ((char_array_4[0]      ) << 2) + ((char_array_4[1] & 0x30) >> 4);
 189            char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
 190            char_array_3[2] = ((char_array_4[2] & 0x3) << 6) +   char_array_4[3];
 191
 192            for (i = 0; (i < 3); i++) {
 193                ret.push_back(char_array_3[i]);
 194            }
 195
 196            i = 0;
 197        }
 198    }
 199
 200    if (i) {
 201        for (j = i; j < 4; j++) {
 202            char_array_4[j] = 0;
 203        }
 204
 205        for (j = 0; j < 4; j++) {
 206            char_array_4[j] = base64_chars.find(char_array_4[j]);
 207        }
 208
 209        char_array_3[0] = ((char_array_4[0]      ) << 2) + ((char_array_4[1] & 0x30) >> 4);
 210        char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2);
 211        char_array_3[2] = ((char_array_4[2] & 0x3) << 6) +   char_array_4[3];
 212
 213        for (j = 0; j < i - 1; j++) {
 214            ret.push_back(char_array_3[j]);
 215        }
 216    }
 217
 218    return ret;
 219}
 220
 221//
 222// server_tokens implementation
 223//
 224
 225server_tokens::server_tokens(mtmd::input_chunks & mtmd_chunks, bool has_mtmd) : has_mtmd(has_mtmd) {
 226    for (size_t i = 0; i < mtmd_chunks.size(); ++i) {
 227        push_back(mtmd_chunks[i]);
 228    }
 229}
 230
 231server_tokens::server_tokens(const llama_tokens & tokens, bool has_mtmd) : has_mtmd(has_mtmd), tokens(tokens) {
 232}
 233
 234llama_pos server_tokens::pos_next() const {
 235    if (!has_mtmd) {
 236        return tokens.size();
 237    }
 238
 239    llama_pos res = tokens.size();
 240
 241    for (auto it = map_idx_to_media.begin(); it != map_idx_to_media.end(); ++it) {
 242        const auto & chunk = it->second;
 243        res += mtmd_input_chunk_get_n_pos(chunk.get()) - mtmd_input_chunk_get_n_tokens(chunk.get());
 244    }
 245
 246    return res;
 247}
 248
 249std::string server_tokens::str() const {
 250    std::ostringstream oss;
 251    oss << "tokens: ";
 252    for (size_t idx = 0; idx < tokens.size(); ++idx) {
 253        llama_token t = tokens[idx];
 254        oss << "idx:" << idx << " ";
 255        if (t == LLAMA_TOKEN_NULL) {
 256            oss << "<embd> ";
 257        } else {
 258            oss << t << " ";
 259        }
 260    }
 261    oss << "\n";
 262    oss << "image idx: ";
 263    for (const auto & it : map_idx_to_media) {
 264        oss << it.first << ", ";
 265    }
 266    return oss.str();
 267}
 268
 269const mtmd::input_chunk_ptr & server_tokens::find_chunk(size_t idx) const {
 270    auto it = map_idx_to_media.find(idx);
 271    if (it != map_idx_to_media.end()) {
 272        return it->second;
 273    }
 274    throw std::runtime_error("Chunk not found");
 275}
 276
 277void server_tokens::push_back(llama_token tok) {
 278    if (tok == LLAMA_TOKEN_NULL) {
 279        throw std::runtime_error("Invalid token");
 280    }
 281    tokens.emplace_back(tok);
 282}
 283
 284void server_tokens::push_back(const mtmd_input_chunk * chunk) {
 285    auto type = mtmd_input_chunk_get_type(chunk);
 286    if (type == MTMD_INPUT_CHUNK_TYPE_IMAGE || type == MTMD_INPUT_CHUNK_TYPE_AUDIO) {
 287        GGML_ASSERT(has_mtmd);
 288        const size_t n_tokens = mtmd_input_chunk_get_n_tokens(chunk);
 289        size_t start_idx = tokens.size();
 290        for (size_t i = 0; i < n_tokens; ++i) {
 291            tokens.emplace_back(LLAMA_TOKEN_NULL);
 292        }
 293        mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk));
 294        map_idx_to_media[start_idx] = std::move(new_chunk);
 295    } else if (type == MTMD_INPUT_CHUNK_TYPE_TEXT) {
 296        size_t n_tokens;
 297        const auto * text_tokens = mtmd_input_chunk_get_tokens_text(chunk, &n_tokens);
 298        for (size_t i = 0; i < n_tokens; ++i) {
 299            push_back(text_tokens[i]);
 300        }
 301    } else {
 302        GGML_ABORT("Invalid chunk type");
 303    }
 304}
 305
 306void server_tokens::push_back(server_tokens & tokens) {
 307    size_t start_idx = size();
 308    for (size_t i = 0; i < tokens.size(); i++) {
 309        push_back(tokens[i]);
 310    }
 311    if (tokens.has_mtmd) {
 312        // Assert if we are copying MTMD chunks to a server_tokens that does not have mtmd.
 313        // We could also just check, but this will prevent silently dropping MTMD data.
 314        GGML_ASSERT(has_mtmd);
 315        for (auto it = tokens.map_idx_to_media.begin(); it != tokens.map_idx_to_media.end(); ) {
 316            auto * chunk = tokens.map_idx_to_media[it->first].get();
 317            mtmd::input_chunk_ptr new_chunk(mtmd_input_chunk_copy(chunk));
 318            map_idx_to_media[start_idx + it->first] = std::move(new_chunk);
 319        }
 320    }
 321}
 322
 323void server_tokens::insert(const llama_tokens & inp_tokens) {
 324    GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
 325    tokens.insert(tokens.end(), inp_tokens.begin(), inp_tokens.end());
 326}
 327
 328const llama_tokens & server_tokens::get_text_tokens() const {
 329    GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
 330    return tokens;
 331}
 332
 333void server_tokens::set_token(llama_pos pos, llama_token id) {
 334    GGML_ASSERT(!has_mtmd); // only allow this if mtmd is disabled
 335    tokens[pos] = id;
 336}
 337
 338void server_tokens::keep_first(size_t n) {
 339    GGML_ASSERT(n <= tokens.size());
 340    if (has_mtmd) {
 341        if (n == tokens.size()) {
 342            return; // nothing to do
 343        }
 344        // we throw an error if we try to remove a token in the middle of an image
 345        // for ex. with input of 5 text tokens and 2 images:
 346        //    [0] [1] [2] [3] [4] [img0] [img0] [img0] [img1] [img1]
 347        // n  1   2   3   4   5   6      7      8      9      10
 348        // allowed to resize      ^                    ^
 349        // disallowed to resize          ^      ^             ^
 350        if (n > 0) {
 351            // make sure we never remove tokens in the middle of an image
 352            // note that the case where we keep a full image at the end is allowed:
 353            //   tokens[n - 1] == LLAMA_TOKEN_NULL && tokens[n] != LLAMA_TOKEN_NULL
 354            if (tokens[n - 1] == LLAMA_TOKEN_NULL && tokens[n] == LLAMA_TOKEN_NULL) {
 355                find_chunk(n - 1); // will throw an error if the token is not begin-of-chunk
 356            }
 357        }
 358        // remove all image chunks that are not used anymore
 359        for (auto it = map_idx_to_media.begin(); it != map_idx_to_media.end(); ) {
 360            size_t idx = it->first;
 361            if (idx >= n) {
 362                it = map_idx_to_media.erase(it);
 363            } else {
 364                ++it;
 365            }
 366        }
 367    }
 368    tokens.resize(n);
 369}
 370
 371std::string server_tokens::detokenize(const llama_context * ctx, bool special) const {
 372    llama_tokens text_tokens;
 373    text_tokens.reserve(tokens.size());
 374    for (const auto & t : tokens) {
 375        if (t != LLAMA_TOKEN_NULL) {
 376            text_tokens.push_back(t);
 377        }
 378    }
 379    return common_detokenize(ctx, text_tokens, special);
 380}
 381
 382size_t server_tokens::get_common_prefix(const server_tokens & b) const {
 383    const size_t max_idx = std::min(tokens.size(), b.tokens.size());
 384
 385    if (!has_mtmd) {
 386        for (size_t i = 0; i < max_idx; ++i) {
 387            if (tokens[i] == b.tokens[i]) {
 388                continue;
 389            }
 390
 391            return i;
 392        }
 393
 394        return max_idx;
 395    }
 396
 397    for (size_t i = 0; i < max_idx; ++i) {
 398        const llama_token ai =   tokens[i];
 399        const llama_token bi = b.tokens[i];
 400
 401        if (ai == LLAMA_TOKEN_NULL && bi == LLAMA_TOKEN_NULL) {
 402            const auto & a_chunk =   find_chunk(i);
 403            const auto & b_chunk = b.find_chunk(i);
 404
 405            GGML_ASSERT(a_chunk && b_chunk);
 406
 407            const std::string id_ai = mtmd_input_chunk_get_id(a_chunk.get());
 408            const std::string id_bi = mtmd_input_chunk_get_id(b_chunk.get());
 409
 410            const size_t n_tok_a = mtmd_input_chunk_get_n_tokens(a_chunk.get());
 411            const size_t n_tok_b = mtmd_input_chunk_get_n_tokens(b_chunk.get());
 412
 413            if (id_ai == id_bi && n_tok_a == n_tok_b) {
 414                GGML_ASSERT(n_tok_a > 0 && "Invalid media chunk"); // should never happen
 415                i += n_tok_a - 1; // will be +1 by the for loop
 416                continue;
 417            }
 418
 419            return i;
 420        }
 421
 422        if (ai == bi) {
 423            continue;
 424        }
 425
 426        return i;
 427    }
 428
 429    return max_idx; // all tokens are equal
 430}
 431
 432bool server_tokens::validate(const struct llama_context * ctx) const {
 433    const llama_model * model = llama_get_model(ctx);
 434    const llama_vocab * vocab = llama_model_get_vocab(model);
 435    const int32_t n_vocab = llama_vocab_n_tokens(vocab);
 436
 437    for (size_t i = 0; i < tokens.size(); ++i) {
 438        const auto & t = tokens[i];
 439        if (t == LLAMA_TOKEN_NULL) {
 440            try {
 441                const auto & chunk = find_chunk(i);
 442                size_t n_tokens = mtmd_input_chunk_get_n_tokens(chunk.get());
 443                i += n_tokens - 1; // will be +1 by the for loop
 444            } catch (const std::exception & e) {
 445                return false;
 446            }
 447        } else if (t < 0 || t >= n_vocab) {
 448            return false;
 449        }
 450    }
 451    return true;
 452}
 453
 454int32_t server_tokens::process_chunk(
 455            llama_context * ctx,
 456            mtmd_context * mctx,
 457            size_t idx,
 458            llama_pos pos,
 459            int32_t seq_id,
 460            size_t & n_tokens_out) const {
 461    const auto & chunk = find_chunk(idx);
 462    const char * name = mtmd_input_chunk_get_type(chunk.get()) == MTMD_INPUT_CHUNK_TYPE_IMAGE
 463                        ? "image" : "audio";
 464    SRV_INF("processing %s...\n", name);
 465    int32_t n_batch = llama_n_batch(ctx);
 466    int64_t t0 = ggml_time_ms();
 467    llama_pos new_n_past; // unused for now
 468    int32_t result = mtmd_helper_eval_chunk_single(mctx, ctx,
 469        chunk.get(),
 470        pos,
 471        seq_id,
 472        n_batch,
 473        true, // logits last
 474        &new_n_past);
 475    SRV_INF("%s processed in %" PRId64 " ms\n", name, ggml_time_ms() - t0);
 476    if (result != 0) {
 477        LOG_ERR("mtmd_helper_eval failed with status %d", result);
 478        n_tokens_out = 0;
 479        return result;
 480    }
 481    n_tokens_out = mtmd_input_chunk_get_n_tokens(chunk.get());
 482    return 0;
 483}
 484
 485server_tokens server_tokens::clone() const {
 486    server_tokens res;
 487    res.has_mtmd = has_mtmd;
 488    res.tokens   = tokens;
 489    for (auto it = map_idx_to_media.begin(); it != map_idx_to_media.end(); ++it) {
 490        size_t idx = it->first;
 491        const mtmd::input_chunk_ptr & chunk = it->second;
 492        res.map_idx_to_media[idx] = mtmd::input_chunk_ptr(mtmd_input_chunk_copy(chunk.get()));
 493    }
 494    return res;
 495}
 496
 497//
 498// tokenizer and input processing utils
 499//
 500
 501bool json_is_array_of_numbers(const json & data) {
 502    if (data.is_array()) {
 503        for (const auto & e : data) {
 504            if (!e.is_number_integer()) {
 505                return false;
 506            }
 507        }
 508        return true;
 509    }
 510    return false;
 511}
 512
 513bool json_is_array_of_mixed_numbers_strings(const json & data) {
 514    bool seen_string = false;
 515    bool seen_number = false;
 516    if (data.is_array()) {
 517        for (const auto & e : data) {
 518            seen_string |= e.is_string();
 519            seen_number |= e.is_number_integer();
 520            if (seen_number && seen_string) {
 521                return true;
 522            }
 523        }
 524    }
 525    return false;
 526}
 527
 528bool json_is_array_and_contains_numbers(const json & data) {
 529    if (data.is_array()) {
 530        for (const auto & e : data) {
 531            if (e.is_number_integer()) {
 532                return true;
 533            }
 534        }
 535        return false;
 536    }
 537    return false;
 538}
 539
 540json json_get_nested_values(const std::vector<std::string> & paths, const json & js) {
 541    json result = json::object();
 542
 543    for (const std::string & path : paths) {
 544        json current = js;
 545        const auto keys = string_split<std::string>(path, /*separator*/ '/');
 546        bool valid_path = true;
 547        for (const std::string & k : keys) {
 548            if (valid_path && current.is_object() && current.contains(k)) {
 549                current = current[k];
 550            } else {
 551                valid_path = false;
 552            }
 553        }
 554        if (valid_path) {
 555            result[path] = current;
 556        }
 557    }
 558    return result;
 559}
 560
 561llama_tokens tokenize_mixed(const llama_vocab * vocab, const json & json_prompt, bool add_special, bool parse_special) {
 562    // If `add_bos` is true, we only add BOS, when json_prompt is a string,
 563    // or the first element of the json_prompt array is a string.
 564    llama_tokens prompt_tokens;
 565
 566    if (json_prompt.is_array()) {
 567        bool first = true;
 568        for (const auto & p : json_prompt) {
 569            if (p.is_string()) {
 570                auto s = p.template get<std::string>();
 571
 572                llama_tokens p;
 573                if (first) {
 574                    p = common_tokenize(vocab, s, add_special, parse_special);
 575                    first = false;
 576                } else {
 577                    p = common_tokenize(vocab, s, false, parse_special);
 578                }
 579
 580                prompt_tokens.insert(prompt_tokens.end(), p.begin(), p.end());
 581            } else {
 582                if (first) {
 583                    first = false;
 584                }
 585
 586                prompt_tokens.push_back(p.template get<llama_token>());
 587            }
 588        }
 589    } else {
 590        auto s = json_prompt.template get<std::string>();
 591        prompt_tokens = common_tokenize(vocab, s, add_special, parse_special);
 592    }
 593
 594    return prompt_tokens;
 595}
 596
 597size_t validate_utf8(const std::string& text) {
 598    size_t len = text.size();
 599    if (len == 0) return 0;
 600
 601    // Check the last few bytes to see if a multi-byte character is cut off
 602    for (size_t i = 1; i <= 4 && i <= len; ++i) {
 603        unsigned char c = text[len - i];
 604        // Check for start of a multi-byte sequence from the end
 605        if ((c & 0xE0) == 0xC0) {
 606            // 2-byte character start: 110xxxxx
 607            // Needs at least 2 bytes
 608            if (i < 2) return len - i;
 609        } else if ((c & 0xF0) == 0xE0) {
 610            // 3-byte character start: 1110xxxx
 611            // Needs at least 3 bytes
 612            if (i < 3) return len - i;
 613        } else if ((c & 0xF8) == 0xF0) {
 614            // 4-byte character start: 11110xxx
 615            // Needs at least 4 bytes
 616            if (i < 4) return len - i;
 617        }
 618    }
 619
 620    // If no cut-off multi-byte character is found, return full length
 621    return len;
 622}
 623
 624// Computes FNV-1a hash of the data
 625static std::string fnv_hash(const uint8_t * data, size_t len) {
 626    const uint64_t fnv_prime = 0x100000001b3ULL;
 627    uint64_t hash = 0xcbf29ce484222325ULL;
 628
 629    for (size_t i = 0; i < len; ++i) {
 630        hash ^= data[i];
 631        hash *= fnv_prime;
 632    }
 633    return std::to_string(hash);
 634}
 635
 636server_tokens process_mtmd_prompt(mtmd_context * mctx, std::string prompt, std::vector<raw_buffer> files) {
 637    mtmd::bitmaps bitmaps;
 638    for (auto & file : files) {
 639        mtmd::bitmap bmp(mtmd_helper_bitmap_init_from_buf(mctx, file.data(), file.size()));
 640        if (!bmp.ptr) {
 641            throw std::runtime_error("Failed to load image or audio file");
 642        }
 643        // calculate bitmap hash (for KV caching)
 644        std::string hash = fnv_hash(bmp.data(), bmp.n_bytes());
 645        bmp.set_id(hash.c_str());
 646        bitmaps.entries.push_back(std::move(bmp));
 647    }
 648    // process prompt
 649    std::vector<server_tokens> inputs;
 650    // multimodal
 651    mtmd_input_text inp_txt = {
 652        prompt.c_str(),
 653        /* add_special */   true,
 654        /* parse_special */ true,
 655    };
 656    mtmd::input_chunks chunks(mtmd_input_chunks_init());
 657    auto bitmaps_c_ptr = bitmaps.c_ptr();
 658    int32_t tokenized = mtmd_tokenize(mctx,
 659                                      chunks.ptr.get(),
 660                                      &inp_txt,
 661                                      bitmaps_c_ptr.data(),
 662                                      bitmaps_c_ptr.size());
 663    if (tokenized != 0) {
 664        throw std::runtime_error("Failed to tokenize prompt");
 665    }
 666    auto result = server_tokens(chunks, true);
 667    return result;
 668}
 669
 670/**
 671 * break the input "prompt" object into multiple prompt if needed, then tokenize them
 672 * use tokenize_input_prompts() if the input could be an array.
 673 * this supports these cases:
 674 * - "prompt": "string"
 675 * - "prompt": [12, 34, 56]
 676 * - "prompt": [12, 34, "string", 56, 78]
 677 * - "prompt": { "prompt_string": "string", "multimodal_data": [ "base64" ] }
 678 */
 679static server_tokens tokenize_input_subprompt(const llama_vocab * vocab, mtmd_context * mctx, const json & json_prompt, bool add_special, bool parse_special) {
 680    constexpr char JSON_STRING_PROMPT_KEY[] = "prompt_string";
 681    constexpr char JSON_MTMD_DATA_KEY[] = "multimodal_data";
 682    const bool has_mtmd = mctx != nullptr;
 683    if (json_prompt.is_string() || json_is_array_of_mixed_numbers_strings(json_prompt)) {
 684        // string or mixed
 685        llama_tokens tmp = tokenize_mixed(vocab, json_prompt, add_special, parse_special);
 686        return server_tokens(tmp, false);
 687    } else if (json_is_array_of_numbers(json_prompt)) {
 688        // array of tokens
 689        llama_tokens tmp = json_prompt.get<llama_tokens>();
 690        return server_tokens(tmp, false);
 691    } else if (json_prompt.contains(JSON_STRING_PROMPT_KEY)) {
 692        // JSON object with prompt key.
 693        if (json_prompt.contains(JSON_MTMD_DATA_KEY)) {
 694            if (!has_mtmd)
 695                throw std::runtime_error("Multimodal data provided, but model does not support multimodal requests.");
 696
 697            // JSON object with prompt and multimodal key.
 698            std::vector<raw_buffer> files;
 699            for (const auto & entry : json_prompt.at(JSON_MTMD_DATA_KEY)) {
 700                files.push_back(base64_decode(entry));
 701            }
 702            return process_mtmd_prompt(mctx, json_prompt.at(JSON_STRING_PROMPT_KEY), files);
 703        } else {
 704            // Not multimodal, but contains a subobject.
 705            llama_tokens tmp = tokenize_mixed(vocab, json_prompt.at(JSON_STRING_PROMPT_KEY), add_special, parse_special);
 706            return server_tokens(tmp, false);
 707        }
 708   } else {
 709       throw std::runtime_error("\"prompt\" elements must be a string, a list of tokens, a JSON object containing a prompt string, or a list of mixed strings & tokens.");
 710   }
 711}
 712
 713std::vector<server_tokens> tokenize_input_prompts(const llama_vocab * vocab, mtmd_context * mctx, const json & json_prompt, bool add_special, bool parse_special) {
 714    std::vector<server_tokens> result;
 715    if (json_prompt.is_array() && !json_is_array_and_contains_numbers(json_prompt)) {
 716        result.reserve(json_prompt.size());
 717        for (const auto & p : json_prompt) {
 718            result.push_back(tokenize_input_subprompt(vocab, mctx, p,add_special, parse_special));
 719        }
 720    } else {
 721        result.push_back(tokenize_input_subprompt(vocab, mctx, json_prompt, add_special, parse_special));
 722    }
 723    if (result.empty()) {
 724        throw std::runtime_error("\"prompt\" must not be empty");
 725    }
 726    return result;
 727}
 728
 729//
 730// OAI utils
 731//
 732
 733// used by /completions endpoint
 734json oaicompat_completion_params_parse(const json & body) {
 735    json llama_params;
 736
 737    if (!body.contains("prompt")) {
 738        throw std::runtime_error("\"prompt\" is required");
 739    }
 740
 741    // Handle "stop" field
 742    if (body.contains("stop") && body.at("stop").is_string()) {
 743        llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
 744    } else {
 745        llama_params["stop"] = json_value(body, "stop", json::array());
 746    }
 747
 748    // Handle "echo" field
 749    if (json_value(body, "echo", false)) {
 750        throw std::runtime_error("Only no echo is supported");
 751    }
 752
 753    // Params supported by OAI but unsupported by llama.cpp
 754    static const std::vector<std::string> unsupported_params { "best_of", "suffix" };
 755    for (const auto & param : unsupported_params) {
 756        if (body.contains(param)) {
 757            throw std::runtime_error("Unsupported param: " + param);
 758        }
 759    }
 760
 761    // Copy remaining properties to llama_params
 762    for (const auto & item : body.items()) {
 763        // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
 764        if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
 765            llama_params[item.key()] = item.value();
 766        }
 767    }
 768
 769    return llama_params;
 770}
 771
 772// media_path always end with '/', see arg.cpp
 773static void handle_media(
 774        std::vector<raw_buffer> & out_files,
 775        json & media_obj,
 776        const std::string & media_path) {
 777    std::string url = json_value(media_obj, "url", std::string());
 778    if (string_starts_with(url, "http")) {
 779        // download remote image
 780        // TODO @ngxson : maybe make these params configurable
 781        common_remote_params params;
 782        params.max_size = 1024 * 1024 * 10; // 10MB
 783        params.timeout  = 10; // seconds
 784        SRV_INF("downloading image from '%s'\n", url.c_str());
 785        auto res = common_remote_get_content(url, params);
 786        if (200 <= res.first && res.first < 300) {
 787            SRV_INF("downloaded %zu bytes\n", res.second.size());
 788            raw_buffer data;
 789            data.insert(data.end(), res.second.begin(), res.second.end());
 790            out_files.push_back(data);
 791        } else {
 792            throw std::runtime_error("Failed to download image");
 793        }
 794
 795    } else if (string_starts_with(url, "file://")) {
 796        if (media_path.empty()) {
 797            throw std::invalid_argument("file:// URLs are not allowed unless --media-path is specified");
 798        }
 799        // load local image file
 800        std::string file_path = url.substr(7); // remove "file://"
 801        raw_buffer data;
 802        if (!fs_validate_filename(file_path, true)) {
 803            throw std::invalid_argument("file path is not allowed: " + file_path);
 804        }
 805        SRV_INF("loading image from local file '%s'\n", (media_path + file_path).c_str());
 806        std::ifstream file(media_path + file_path, std::ios::binary);
 807        if (!file) {
 808            throw std::invalid_argument("file does not exist or cannot be opened: " + file_path);
 809        }
 810        data.assign((std::istreambuf_iterator<char>(file)), std::istreambuf_iterator<char>());
 811        out_files.push_back(data);
 812
 813    } else {
 814        // try to decode base64 image
 815        std::vector<std::string> parts = string_split<std::string>(url, /*separator*/ ',');
 816        if (parts.size() != 2) {
 817            throw std::runtime_error("Invalid url value");
 818        } else if (!string_starts_with(parts[0], "data:image/")) {
 819            throw std::runtime_error("Invalid url format: " + parts[0]);
 820        } else if (!string_ends_with(parts[0], "base64")) {
 821            throw std::runtime_error("url must be base64 encoded");
 822        } else {
 823            auto base64_data = parts[1];
 824            auto decoded_data = base64_decode(base64_data);
 825            out_files.push_back(decoded_data);
 826        }
 827    }
 828}
 829
 830// used by /chat/completions endpoint
 831json oaicompat_chat_params_parse(
 832    json & body, /* openai api json semantics */
 833    const server_chat_params & opt,
 834    std::vector<raw_buffer> & out_files)
 835{
 836    json llama_params;
 837
 838    auto tools = json_value(body, "tools", json());
 839    auto has_tools = tools.is_array() && !tools.empty();
 840    auto stream = json_value(body, "stream", false);
 841    auto tool_choice = json_value(body, "tool_choice", std::string("auto"));
 842
 843    if (!opt.use_jinja) {
 844        if (has_tools) {
 845            throw std::runtime_error("tools param requires --jinja flag");
 846        }
 847        if (tool_choice != "auto") {
 848            throw std::runtime_error("tool_choice param requires --jinja flag");
 849        }
 850    }
 851
 852    // Handle "stop" field
 853    if (body.contains("stop") && body.at("stop").is_string()) {
 854        llama_params["stop"] = json::array({body.at("stop").get<std::string>()});
 855    } else {
 856        llama_params["stop"] = json_value(body, "stop", json::array());
 857    }
 858
 859    auto json_schema = json_value(body, "json_schema", json());
 860    auto grammar = json_value(body, "grammar", std::string());
 861    if (!json_schema.is_null() && !grammar.empty()) {
 862        throw std::runtime_error("Cannot use both json_schema and grammar");
 863    }
 864
 865    // Handle "response_format" field
 866    if (body.contains("response_format")) {
 867        json response_format      = json_value(body, "response_format", json::object());
 868        std::string response_type = json_value(response_format, "type", std::string());
 869        if (response_type == "json_object") {
 870            json_schema = json_value(response_format, "schema", json::object());
 871        } else if (response_type == "json_schema") {
 872            auto schema_wrapper = json_value(response_format, "json_schema", json::object());
 873            json_schema = json_value(schema_wrapper, "schema", json::object());
 874        } else if (!response_type.empty() && response_type != "text") {
 875            throw std::invalid_argument("response_format type must be one of \"text\" or \"json_object\", but got: " + response_type);
 876        }
 877    }
 878
 879    // get input files
 880    if (!body.contains("messages")) {
 881        throw std::invalid_argument("'messages' is required");
 882    }
 883    json & messages = body.at("messages");
 884    if (!messages.is_array()) {
 885        throw std::invalid_argument("Expected 'messages' to be an array");
 886    }
 887    for (auto & msg : messages) {
 888        std::string role = json_value(msg, "role", std::string());
 889        if (role != "assistant" && !msg.contains("content")) {
 890            throw std::invalid_argument("All non-assistant messages must contain 'content'");
 891        }
 892        if (role == "assistant") {
 893            if (!msg.contains("content") && !msg.contains("tool_calls")) {
 894                throw std::invalid_argument("Assistant message must contain either 'content' or 'tool_calls'!");
 895            }
 896            if (!msg.contains("content")) {
 897                continue; // avoid errors with no content
 898            }
 899        }
 900        json & content = msg.at("content");
 901        if (content.is_string() || content.is_null()) {
 902            continue;
 903        }
 904
 905        if (!content.is_array()) {
 906            throw std::invalid_argument("Expected 'content' to be a string or an array");
 907        }
 908
 909        for (auto & p : content) {
 910            std::string type      = json_value(p, "type", std::string());
 911            if (type == "image_url") {
 912                if (!opt.allow_image) {
 913                    throw std::runtime_error("image input is not supported - hint: if this is unexpected, you may need to provide the mmproj");
 914                }
 915
 916                json image_url = json_value(p, "image_url", json::object());
 917                handle_media(out_files, image_url, opt.media_path);
 918
 919                // replace this chunk with a marker
 920                p["type"] = "text";
 921                p["text"] = mtmd_default_marker();
 922                p.erase("image_url");
 923
 924            } else if (type == "input_audio") {
 925                if (!opt.allow_audio) {
 926                    throw std::runtime_error("audio input is not supported - hint: if this is unexpected, you may need to provide the mmproj");
 927                }
 928
 929                json input_audio   = json_value(p, "input_audio", json::object());
 930                std::string data   = json_value(input_audio, "data", std::string());
 931                std::string format = json_value(input_audio, "format", std::string());
 932                // while we also support flac, we don't allow it here so we matches the OAI spec
 933                if (format != "wav" && format != "mp3") {
 934                    throw std::invalid_argument("input_audio.format must be either 'wav' or 'mp3'");
 935                }
 936                auto decoded_data = base64_decode(data); // expected to be base64 encoded
 937                out_files.push_back(decoded_data);
 938
 939                // TODO: add audio_url support by reusing handle_media()
 940
 941                // replace this chunk with a marker
 942                p["type"] = "text";
 943                p["text"] = mtmd_default_marker();
 944                p.erase("input_audio");
 945
 946            } else if (type != "text") {
 947                throw std::invalid_argument("unsupported content[].type");
 948            }
 949        }
 950    }
 951
 952    common_chat_templates_inputs inputs;
 953    inputs.messages              = common_chat_msgs_parse_oaicompat(messages);
 954    inputs.tools                 = common_chat_tools_parse_oaicompat(tools);
 955    inputs.tool_choice           = common_chat_tool_choice_parse_oaicompat(tool_choice);
 956    inputs.json_schema           = json_schema.is_null() ? "" : json_schema.dump();
 957    inputs.grammar               = grammar;
 958    inputs.use_jinja             = opt.use_jinja;
 959    inputs.parallel_tool_calls   = json_value(body, "parallel_tool_calls", false);
 960    inputs.add_generation_prompt = json_value(body, "add_generation_prompt", true);
 961    inputs.reasoning_format      = opt.reasoning_format;
 962    if (body.contains("reasoning_format")) {
 963        inputs.reasoning_format = common_reasoning_format_from_name(body.at("reasoning_format").get<std::string>());
 964    }
 965    inputs.enable_thinking       = opt.enable_thinking;
 966    if (!inputs.tools.empty() && inputs.tool_choice != COMMON_CHAT_TOOL_CHOICE_NONE) {
 967        if (body.contains("grammar")) {
 968            throw std::invalid_argument("Cannot use custom grammar constraints with tools.");
 969        }
 970        llama_params["parse_tool_calls"] = true;
 971    }
 972
 973    // merge the template args provided from command line with the args provided in the user request
 974    auto chat_template_kwargs_object = json_value(body, "chat_template_kwargs", json::object());
 975    inputs.chat_template_kwargs = opt.chat_template_kwargs;
 976    for (const auto & item : chat_template_kwargs_object.items()) {
 977        inputs.chat_template_kwargs[item.key()] = item.value().dump();
 978    }
 979
 980    // parse the "enable_thinking" kwarg to override the default value
 981    auto enable_thinking_kwarg = json_value(inputs.chat_template_kwargs, "enable_thinking", std::string(""));
 982    if (enable_thinking_kwarg == "true") {
 983        inputs.enable_thinking = true;
 984    } else if (enable_thinking_kwarg == "false") {
 985        inputs.enable_thinking = false;
 986    } else if (!enable_thinking_kwarg.empty() && enable_thinking_kwarg[0] == '"') {
 987        throw std::invalid_argument("invalid type for \"enable_thinking\" (expected boolean, got string)");
 988    }
 989
 990    // if the assistant message appears at the end of list, we do not add end-of-turn token
 991    // for ex. this can be useful to modify the reasoning process in reasoning models
 992    bool prefill_assistant_message = !inputs.messages.empty() && inputs.messages.back().role == "assistant" && opt.prefill_assistant;
 993    common_chat_msg last_message;
 994    if (prefill_assistant_message) {
 995        last_message = inputs.messages.back();
 996        inputs.messages.pop_back();
 997
 998        /* sanity check, max one assistant message at the end of the list */
 999        if (!inputs.messages.empty() && inputs.messages.back().role == "assistant"){
1000            throw std::invalid_argument("Cannot have 2 or more assistant messages at the end of the list.");
1001        }
1002
1003        /* TODO: test this properly */
1004        inputs.reasoning_format = COMMON_REASONING_FORMAT_NONE;
1005
1006        if ( inputs.enable_thinking ) {
1007            throw std::invalid_argument("Assistant response prefill is incompatible with enable_thinking.");
1008        }
1009
1010        inputs.add_generation_prompt = true;
1011    }
1012
1013    // Apply chat template to the list of messages
1014    auto chat_params = common_chat_templates_apply(opt.tmpls.get(), inputs);
1015
1016    /* Append assistant prefilled message */
1017    if (prefill_assistant_message) {
1018        if (!last_message.content_parts.empty()) {
1019            for (auto & p : last_message.content_parts) {
1020                chat_params.prompt += p.text;
1021            }
1022        } else {
1023            chat_params.prompt += last_message.content;
1024        }
1025    }
1026
1027    llama_params["chat_format"]      = static_cast<int>(chat_params.format);
1028    llama_params["prompt"]           = chat_params.prompt;
1029    if (!chat_params.grammar.empty()) {
1030        llama_params["grammar"] = chat_params.grammar;
1031    }
1032    llama_params["grammar_lazy"]     = chat_params.grammar_lazy;
1033    auto grammar_triggers = json::array();
1034    for (const auto & trigger : chat_params.grammar_triggers) {
1035        server_grammar_trigger ct(trigger);
1036        grammar_triggers.push_back(ct.to_json());
1037    }
1038    llama_params["grammar_triggers"] = grammar_triggers;
1039    llama_params["preserved_tokens"] = chat_params.preserved_tokens;
1040    llama_params["thinking_forced_open"]     = chat_params.thinking_forced_open;
1041    for (const auto & stop : chat_params.additional_stops) {
1042        llama_params["stop"].push_back(stop);
1043    }
1044    if (!chat_params.parser.empty()) {
1045        llama_params["chat_parser"] = chat_params.parser;
1046    }
1047
1048    // Handle "logprobs" field
1049    // TODO: The response format of this option is not yet OAI-compatible, but seems like no one really using it; We may need to fix it in the future
1050    if (json_value(body, "logprobs", false)) {
1051        if (has_tools && stream) {
1052            throw std::invalid_argument("logprobs is not supported with tools + stream");
1053        }
1054        llama_params["n_probs"] = json_value(body, "top_logprobs", 20);
1055    } else if (body.contains("top_logprobs") && !body.at("top_logprobs").is_null()) {
1056        throw std::invalid_argument("top_logprobs requires logprobs to be set to true");
1057    }
1058
1059    // Copy remaining properties to llama_params
1060    // This allows user to use llama.cpp-specific params like "mirostat", ... via OAI endpoint.
1061    // See "launch_slot_with_task()" for a complete list of params supported by llama.cpp
1062    for (const auto & item : body.items()) {
1063        // Exception: if "n_predict" is present, we overwrite the value specified earlier by "max_tokens"
1064        if (!llama_params.contains(item.key()) || item.key() == "n_predict") {
1065            llama_params[item.key()] = item.value();
1066        }
1067    }
1068
1069    return llama_params;
1070}
1071
1072json convert_responses_to_chatcmpl(const json & response_body) {
1073    if (!response_body.contains("input")) {
1074        throw std::invalid_argument("'input' is required");
1075    }
1076    if (!json_value(response_body, "previous_response_id", std::string{}).empty()) {
1077        throw std::invalid_argument("llama.cpp does not support 'previous_response_id'.");
1078    }
1079
1080    const json input_value = response_body.at("input");
1081    json chatcmpl_body = response_body;
1082    chatcmpl_body.erase("input");
1083    std::vector<json> chatcmpl_messages;
1084
1085    if (response_body.contains("instructions")) {
1086        chatcmpl_messages.push_back({
1087            {"role",    "system"},
1088            {"content", json_value(response_body, "instructions", std::string())},
1089        });
1090        chatcmpl_body.erase("instructions");
1091    }
1092
1093    if (input_value.is_string()) {
1094        // #responses_create-input-text_input
1095        chatcmpl_messages.push_back({
1096            {"role",    "user"},
1097            {"content", input_value},
1098        });
1099    } else if (input_value.is_array()) {
1100        // #responses_create-input-input_item_list
1101
1102        static auto exists_and_is_array = [](const json & j, const char * key) -> bool {
1103            return j.contains(key) && j.at(key).is_array();
1104        };
1105        static auto exists_and_is_string = [](const json & j, const char * key) -> bool {
1106            return j.contains(key) && j.at(key).is_string();
1107        };
1108
1109        for (json item : input_value) {
1110            if (exists_and_is_string(item, "content")) {
1111                // #responses_create-input-input_item_list-input_message-content-text_input
1112                // Only "Input message" contains item["content"]::string
1113                // After converting item["content"]::string to item["content"]::array,
1114                // we can treat "Input message" as sum of "Item-Input message" and "Item-Output message"
1115                item["content"] = json::array({
1116                    json {
1117                        {"text", item.at("content")},
1118                        {"type", "input_text"}
1119                    }
1120                });
1121            }
1122
1123            if (exists_and_is_array(item, "content") &&
1124                exists_and_is_string(item, "role") &&
1125                (item.at("role") == "user" ||
1126                    item.at("role") == "system" ||
1127                    item.at("role") == "developer")
1128            ) {
1129                // #responses_create-input-input_item_list-item-input_message
1130                std::vector<json> chatcmpl_content;
1131
1132                for (const json & input_item : item.at("content")) {
1133                    const std::string type = json_value(input_item, "type", std::string());
1134
1135                    if (type == "input_text") {
1136                        if (!input_item.contains("text")) {
1137                            throw std::invalid_argument("'Input text' requires 'text'");
1138                        }
1139                        chatcmpl_content.push_back({
1140                            {"text", input_item.at("text")},
1141                            {"type", "text"},
1142                        });
1143                    } else if (type == "input_image") {
1144                        // While `detail` is marked as required,
1145                        // it has default value("auto") and can be omitted.
1146
1147                        if (!input_item.contains("image_url")) {
1148                            throw std::invalid_argument("'image_url' is required");
1149                        }
1150                        chatcmpl_content.push_back({
1151                            {"image_url", json {
1152                                {"url", input_item.at("image_url")}
1153                            }},
1154                            {"type", "image_url"},
1155                        });
1156                    } else if (type == "input_file") {
1157                        throw std::invalid_argument("'input_file' is not supported by llamacpp at this moment");
1158                        // if (input_item.contains("file_url")) {
1159                        //     // chat completion API does not support file_url
1160                        //     throw std::invalid_argument("'file_url' is not supported");
1161                        // }
1162                        // if (!input_item.contains("file_data") || !input_item.contains("filename")) {
1163                        //     throw std::invalid_argument("Both 'file_data' and 'filename' are required");
1164                        // }
1165                        // chatcmpl_content.push_back({
1166                        //     {"file", json {
1167                        //         {"file_data", input_item.at("file_data")},
1168                        //         {"filename",  input_item.at("filename")},
1169                        //     }},
1170                        //     {"type", "file"},
1171                        // });
1172                    } else {
1173                        throw std::invalid_argument("'type' must be one of 'input_text', 'input_image', or 'input_file'");
1174                    }
1175                }
1176
1177                if (item.contains("type")) {
1178                    item.erase("type");
1179                }
1180                if (item.contains("status")) {
1181                    item.erase("status");
1182                }
1183                item["content"] = chatcmpl_content;
1184
1185                chatcmpl_messages.push_back(item);
1186            } else if (exists_and_is_array(item, "content") &&
1187                exists_and_is_string(item, "role") &&
1188                item.at("role") == "assistant" &&
1189                // exists_and_is_string(item, "status") &&
1190                // (item.at("status") == "in_progress" ||
1191                //     item.at("status") == "completed" ||
1192                //     item.at("status") == "incomplete") &&
1193                // item["status"] not sent by codex-cli
1194                exists_and_is_string(item, "type") &&
1195                item.at("type") == "message"
1196            ) {
1197                // #responses_create-input-input_item_list-item-output_message
1198                std::vector<json> chatcmpl_content;
1199
1200                for (const auto & output_text : item.at("content")) {
1201                    const std::string type = json_value(output_text, "type", std::string());
1202                    if (type != "output_text") {
1203                        throw std::invalid_argument("'type' must be 'output_text'");
1204                    }
1205                    if (!exists_and_is_string(output_text, "text")) {
1206                        throw std::invalid_argument("'Output text' requires 'text'");
1207                    }
1208                    // Ignore annotations and logprobs for now
1209                    chatcmpl_content.push_back({
1210                        {"text", output_text.at("text")},
1211                        {"type", "text"},
1212                    });
1213                }
1214
1215                item.erase("status");
1216                item.erase("type");
1217                item["content"] = chatcmpl_content;
1218                chatcmpl_messages.push_back(item);
1219            } else if (exists_and_is_string(item, "arguments") &&
1220                exists_and_is_string(item, "call_id") &&
1221                exists_and_is_string(item, "name") &&
1222                exists_and_is_string(item, "type") &&
1223                item.at("type") == "function_call"
1224            ) {
1225                // #responses_create-input-input_item_list-item-function_tool_call
1226                json msg = json {
1227                    {"role", "assistant"},
1228                    {"tool_calls", json::array({ json {
1229                        {"function", json {
1230                            {"arguments", item.at("arguments")},
1231                            {"name",      item.at("name")},
1232                        }},
1233                        {"id",   item.at("call_id")},
1234                        {"type", "function"},
1235                    }})},
1236                };
1237
1238                if (!chatcmpl_messages.empty() && chatcmpl_messages.back().contains("reasoning_content")) {
1239                    // Move reasoning content from dummy message to tool call message
1240                    msg["reasoning_content"] = chatcmpl_messages.back().at("reasoning_content");
1241                    chatcmpl_messages.pop_back();
1242                }
1243                chatcmpl_messages.push_back(msg);
1244            } else if (exists_and_is_string(item, "call_id") &&
1245                (exists_and_is_string(item, "output") || exists_and_is_array(item, "output")) &&
1246                exists_and_is_string(item, "type") &&
1247                item.at("type") == "function_call_output"
1248            ) {
1249                // #responses_create-input-input_item_list-item-function_tool_call_output
1250                if (item.at("output").is_string()) {
1251                    chatcmpl_messages.push_back(json {
1252                        {"content",      item.at("output")},
1253                        {"role",         "tool"},
1254                        {"tool_call_id", item.at("call_id")},
1255                    });
1256                } else {
1257                    json chatcmpl_outputs = item.at("output");
1258                    for (json & chatcmpl_output : chatcmpl_outputs) {
1259                        if (!chatcmpl_output.contains("type") || chatcmpl_output.at("type") != "input_text") {
1260                            throw std::invalid_argument("Output of tool call should be 'Input text'");
1261                        }
1262                        chatcmpl_output["type"] = "text";
1263                    }
1264                    chatcmpl_messages.push_back(json {
1265                        {"content",      chatcmpl_outputs},
1266                        {"role",         "tool"},
1267                        {"tool_call_id", item.at("call_id")},
1268                    });
1269                }
1270            } else if (// exists_and_is_string(item, "id") &&
1271                // item["id"] not sent by codex-cli
1272                exists_and_is_array(item, "summary") &&
1273                exists_and_is_string(item, "type") &&
1274                item.at("type") == "reasoning") {
1275                // #responses_create-input-input_item_list-item-reasoning
1276
1277                if (!exists_and_is_array(item, "content")) {
1278                    throw std::invalid_argument("item['content'] is not an array");
1279                }
1280                if (item.at("content").empty()) {
1281                    throw std::invalid_argument("item['content'] is empty");
1282                }
1283                if (!exists_and_is_string(item.at("content")[0], "text")) {
1284                    throw std::invalid_argument("item['content']['text'] is not a string");
1285                }
1286
1287                // Pack reasoning content in dummy message
1288                chatcmpl_messages.push_back(json {
1289                    {"role", "assistant"},
1290                    {"content", json::array()},
1291                    {"reasoning_content", item.at("content")[0].at("text")},
1292                });
1293            } else {
1294                throw std::invalid_argument("Cannot determine type of 'item'");
1295            }
1296        }
1297    } else {
1298        throw std::invalid_argument("'input' must be a string or array of objects");
1299    }
1300
1301    // Remove unused dummy message which contains
1302    // reasoning content not followed by tool call
1303    chatcmpl_messages.erase(std::remove_if(
1304        chatcmpl_messages.begin(),
1305        chatcmpl_messages.end(),
1306        [](const json & x){ return x.contains("role") &&
1307            x.at("role") == "assistant" &&
1308            x.contains("content") &&
1309            x.at("content") == json::array() &&
1310            x.contains("reasoning_content");
1311        }),
1312        chatcmpl_messages.end()
1313    );
1314
1315    chatcmpl_body["messages"] = chatcmpl_messages;
1316
1317    if (response_body.contains("tools")) {
1318        if (!response_body.at("tools").is_array()) {
1319            throw std::invalid_argument("'tools' must be an array of objects");
1320        }
1321        std::vector<json> chatcmpl_tools;
1322        for (json resp_tool : response_body.at("tools")) {
1323            json chatcmpl_tool;
1324
1325            if (json_value(resp_tool, "type", std::string()) != "function") {
1326                throw std::invalid_argument("'type' of tool must be 'function'");
1327            }
1328            resp_tool.erase("type");
1329            chatcmpl_tool["type"] = "function";
1330
1331            if (!resp_tool.contains("strict")) {
1332                resp_tool["strict"] = true;
1333            }
1334            chatcmpl_tool["function"] = resp_tool;
1335            chatcmpl_tools.push_back(chatcmpl_tool);
1336        }
1337        chatcmpl_body.erase("tools");
1338        chatcmpl_body["tools"] = chatcmpl_tools;
1339    }
1340
1341    if (response_body.contains("max_output_tokens")) {
1342        chatcmpl_body.erase("max_output_tokens");
1343        chatcmpl_body["max_tokens"] = response_body["max_output_tokens"];
1344    }
1345
1346    return chatcmpl_body;
1347}
1348
1349json convert_anthropic_to_oai(const json & body) {
1350    json oai_body;
1351
1352    // Convert system prompt
1353    json oai_messages = json::array();
1354    auto system_param = json_value(body, "system", json());
1355    if (!system_param.is_null()) {
1356        std::string system_content;
1357
1358        if (system_param.is_string()) {
1359            system_content = system_param.get<std::string>();
1360        } else if (system_param.is_array()) {
1361            for (const auto & block : system_param) {
1362                if (json_value(block, "type", std::string()) == "text") {
1363                    system_content += json_value(block, "text", std::string());
1364                }
1365            }
1366        }
1367
1368        oai_messages.push_back({
1369            {"role", "system"},
1370            {"content", system_content}
1371        });
1372    }
1373
1374    // Convert messages
1375    if (!body.contains("messages")) {
1376        throw std::runtime_error("'messages' is required");
1377    }
1378    const json & messages = body.at("messages");
1379    if (messages.is_array()) {
1380        for (const auto & msg : messages) {
1381            std::string role = json_value(msg, "role", std::string());
1382
1383            if (!msg.contains("content")) {
1384                if (role == "assistant") {
1385                    continue;
1386                }
1387                oai_messages.push_back(msg);
1388                continue;
1389            }
1390
1391            const json & content = msg.at("content");
1392
1393            if (content.is_string()) {
1394                oai_messages.push_back(msg);
1395                continue;
1396            }
1397
1398            if (!content.is_array()) {
1399                oai_messages.push_back(msg);
1400                continue;
1401            }
1402
1403            json tool_calls = json::array();
1404            json converted_content = json::array();
1405            json tool_results = json::array();
1406            bool has_tool_calls = false;
1407
1408            for (const auto & block : content) {
1409                std::string type = json_value(block, "type", std::string());
1410
1411                if (type == "text") {
1412                    converted_content.push_back(block);
1413                } else if (type == "image") {
1414                    json source = json_value(block, "source", json::object());
1415                    std::string source_type = json_value(source, "type", std::string());
1416
1417                    if (source_type == "base64") {
1418                        std::string media_type = json_value(source, "media_type", std::string("image/jpeg"));
1419                        std::string data = json_value(source, "data", std::string());
1420                        std::ostringstream ss;
1421                        ss << "data:" << media_type << ";base64," << data;
1422
1423                        converted_content.push_back({
1424                            {"type", "image_url"},
1425                            {"image_url", {
1426                                {"url", ss.str()}
1427                            }}
1428                        });
1429                    } else if (source_type == "url") {
1430                        std::string url = json_value(source, "url", std::string());
1431                        converted_content.push_back({
1432                            {"type", "image_url"},
1433                            {"image_url", {
1434                                {"url", url}
1435                            }}
1436                        });
1437                    }
1438                } else if (type == "tool_use") {
1439                    tool_calls.push_back({
1440                        {"id", json_value(block, "id", std::string())},
1441                        {"type", "function"},
1442                        {"function", {
1443                            {"name", json_value(block, "name", std::string())},
1444                            {"arguments", json_value(block, "input", json::object()).dump()}
1445                        }}
1446                    });
1447                    has_tool_calls = true;
1448                } else if (type == "tool_result") {
1449                    std::string tool_use_id = json_value(block, "tool_use_id", std::string());
1450
1451                    auto result_content = json_value(block, "content", json());
1452                    std::string result_text;
1453                    if (result_content.is_string()) {
1454                        result_text = result_content.get<std::string>();
1455                    } else if (result_content.is_array()) {
1456                        for (const auto & c : result_content) {
1457                            if (json_value(c, "type", std::string()) == "text") {
1458                                result_text += json_value(c, "text", std::string());
1459                            }
1460                        }
1461                    }
1462
1463                    tool_results.push_back({
1464                        {"role", "tool"},
1465                        {"tool_call_id", tool_use_id},
1466                        {"content", result_text}
1467                    });
1468                }
1469            }
1470
1471            if (!converted_content.empty() || has_tool_calls) {
1472                json new_msg = {{"role", role}};
1473                if (!converted_content.empty()) {
1474                    new_msg["content"] = converted_content;
1475                } else if (has_tool_calls) {
1476                    new_msg["content"] = "";
1477                }
1478                if (!tool_calls.empty()) {
1479                    new_msg["tool_calls"] = tool_calls;
1480                }
1481                oai_messages.push_back(new_msg);
1482            }
1483
1484            for (const auto & tool_msg : tool_results) {
1485                oai_messages.push_back(tool_msg);
1486            }
1487        }
1488    }
1489
1490    oai_body["messages"] = oai_messages;
1491
1492    // Convert tools
1493    if (body.contains("tools")) {
1494        const json & tools = body.at("tools");
1495        if (tools.is_array()) {
1496            json oai_tools = json::array();
1497            for (const auto & tool : tools) {
1498                oai_tools.push_back({
1499                    {"type", "function"},
1500                    {"function", {
1501                        {"name", json_value(tool, "name", std::string())},
1502                        {"description", json_value(tool, "description", std::string())},
1503                        {"parameters", tool.contains("input_schema") ? tool.at("input_schema") : json::object()}
1504                    }}
1505                });
1506            }
1507            oai_body["tools"] = oai_tools;
1508        }
1509    }
1510
1511    // Convert tool_choice
1512    if (body.contains("tool_choice")) {
1513        const json & tc = body.at("tool_choice");
1514        if (tc.is_object()) {
1515            std::string type = json_value(tc, "type", std::string());
1516            if (type == "auto") {
1517                oai_body["tool_choice"] = "auto";
1518            } else if (type == "any" || type == "tool") {
1519                oai_body["tool_choice"] = "required";
1520            }
1521        }
1522    }
1523
1524    // Convert stop_sequences to stop
1525    if (body.contains("stop_sequences")) {
1526        oai_body["stop"] = body.at("stop_sequences");
1527    }
1528
1529    // Handle max_tokens (required in Anthropic, but we're permissive)
1530    if (body.contains("max_tokens")) {
1531        oai_body["max_tokens"] = body.at("max_tokens");
1532    } else {
1533        oai_body["max_tokens"] = 4096;
1534    }
1535
1536    // Pass through common params
1537    for (const auto & key : {"temperature", "top_p", "top_k", "stream"}) {
1538        if (body.contains(key)) {
1539            oai_body[key] = body.at(key);
1540        }
1541    }
1542
1543    // Handle Anthropic-specific thinking param
1544    if (body.contains("thinking")) {
1545        json thinking = json_value(body, "thinking", json::object());
1546        std::string thinking_type = json_value(thinking, "type", std::string());
1547        if (thinking_type == "enabled") {
1548            int budget_tokens = json_value(thinking, "budget_tokens", 10000);
1549            oai_body["thinking_budget_tokens"] = budget_tokens;
1550        }
1551    }
1552
1553    // Handle Anthropic-specific metadata param
1554    if (body.contains("metadata")) {
1555        json metadata = json_value(body, "metadata", json::object());
1556        std::string user_id = json_value(metadata, "user_id", std::string());
1557        if (!user_id.empty()) {
1558            oai_body["__metadata_user_id"] = user_id;
1559        }
1560    }
1561
1562    return oai_body;
1563}
1564
1565json format_embeddings_response_oaicompat(
1566        const json & request,
1567        const std::string & model_name,
1568        const json & embeddings,
1569        bool use_base64) {
1570    json data = json::array();
1571    int32_t n_tokens = 0;
1572    int i = 0;
1573    for (const auto & elem : embeddings) {
1574        json embedding_obj;
1575
1576        if (use_base64) {
1577            const auto& vec = json_value(elem, "embedding", json::array()).get<std::vector<float>>();
1578            const char* data_ptr = reinterpret_cast<const char*>(vec.data());
1579            size_t data_size = vec.size() * sizeof(float);
1580            embedding_obj = {
1581                {"embedding", base64::encode(data_ptr, data_size)},
1582                {"index", i++},
1583                {"object", "embedding"},
1584                {"encoding_format", "base64"}
1585            };
1586        } else {
1587            embedding_obj = {
1588                {"embedding", json_value(elem, "embedding", json::array())},
1589                {"index", i++},
1590                {"object", "embedding"}
1591            };
1592        }
1593        data.push_back(embedding_obj);
1594
1595        n_tokens += json_value(elem, "tokens_evaluated", 0);
1596    }
1597
1598    json res = json {
1599        {"model", json_value(request, "model", model_name)},
1600        {"object", "list"},
1601        {"usage", json {
1602            {"prompt_tokens", n_tokens},
1603            {"total_tokens", n_tokens}
1604        }},
1605        {"data", data}
1606    };
1607
1608    return res;
1609}
1610
1611json format_response_rerank(
1612        const json & request,
1613        const std::string & model_name,
1614        const json & ranks,
1615        bool is_tei_format,
1616        std::vector<std::string> & texts,
1617        int top_n) {
1618    int32_t n_tokens = 0;
1619    bool return_text = is_tei_format && json_value(request, "return_text", false);
1620    std::vector<json> elements; // Temporary vector to hold unsorted elements
1621    std::string score_label = is_tei_format ? "score" : "relevance_score";
1622    for (const auto & rank : ranks) {
1623        int index = json_value(rank, "index", 0);
1624        json elem = json{
1625            {"index", index},
1626            {score_label, json_value(rank, "score", 0.0)},
1627        };
1628        n_tokens += json_value(rank, "tokens_evaluated", 0);
1629        if (return_text) {
1630            elem["text"] = std::move(texts[index]);
1631        }
1632        elements.push_back(elem);
1633    }
1634
1635    std::sort(elements.begin(), elements.end(), [score_label](const json& a, const json& b) {
1636        return json_value(a, score_label, 0.0) > json_value(b, score_label, 0.0);
1637    });
1638
1639    elements.resize(std::min(top_n, (int)elements.size()));
1640    json results = elements;
1641
1642    if (is_tei_format) return results;
1643
1644    json res = json{
1645        {"model", json_value(request, "model", model_name)},
1646        {"object", "list"},
1647        {"usage", json{
1648            {"prompt_tokens", n_tokens},
1649            {"total_tokens", n_tokens}
1650        }},
1651        {"results", results}
1652    };
1653
1654    return res;
1655}
1656
1657
1658//
1659// other utils
1660//
1661
1662std::vector<llama_token_data> get_token_probabilities(llama_context * ctx, int idx) {
1663    std::vector<llama_token_data> cur;
1664
1665    const auto * logits = llama_get_logits_ith(ctx, idx);
1666    const llama_token * sampled_ids = llama_get_sampled_candidates_ith(ctx, idx);
1667
1668    const int n_logits = llama_get_sampled_logits_count_ith(ctx, idx);
1669
1670    cur.resize(n_logits);
1671    if (sampled_ids) {
1672        for (int i = 0; i < n_logits; i++) {
1673            cur[i] = llama_token_data{sampled_ids[i], logits[i], 0.0f};
1674        }
1675    } else {
1676        for (llama_token token_id = 0; token_id < n_logits; token_id++) {
1677            cur[token_id] = llama_token_data{token_id, logits[token_id], 0.0f};
1678        }
1679    }
1680
1681    // sort tokens by logits
1682    std::sort(cur.begin(), cur.end(), [](const llama_token_data & a, const llama_token_data & b) {
1683        return a.logit > b.logit;
1684    });
1685
1686    // apply softmax
1687    float max_l = cur[0].logit;
1688    float cum_sum = 0.0f;
1689    for (size_t i = 0; i < cur.size(); ++i) {
1690        float p = expf(cur[i].logit - max_l);
1691        cur[i].p = p;
1692        cum_sum += p;
1693    }
1694    for (size_t i = 0; i < cur.size(); ++i) {
1695        cur[i].p /= cum_sum;
1696    }
1697
1698    return cur;
1699}
1700
1701std::string safe_json_to_str(const json & data) {
1702    return data.dump(-1, ' ', false, json::error_handler_t::replace);
1703}
1704
1705// TODO: reuse llama_detokenize
1706template <class Iter>
1707static std::string tokens_to_str(const llama_vocab * ctx, Iter begin, Iter end) {
1708    std::string ret;
1709    for (; begin != end; ++begin) {
1710        ret += common_token_to_piece(ctx, *begin);
1711    }
1712
1713    return ret;
1714}
1715
1716std::string tokens_to_str(llama_context * ctx, const llama_tokens & tokens) {
1717    auto model = llama_get_model(ctx);
1718    return tokens_to_str(llama_model_get_vocab(model), tokens.begin(), tokens.end());
1719}
1720
1721std::string tokens_to_str(const llama_vocab * vocab, const llama_tokens & tokens) {
1722    return tokens_to_str(vocab, tokens.begin(), tokens.end());
1723}
1724
1725// format incomplete utf-8 multibyte character for output
1726std::string tokens_to_output_formatted_string(const llama_context * ctx, const llama_token token) {
1727    std::string out = token == LLAMA_TOKEN_NULL ? "" : common_token_to_piece(ctx, token);
1728
1729    // if the size is 1 and first bit is 1, meaning it's a partial character
1730    //   (size > 1 meaning it's already a known token)
1731    if (out.size() == 1 && (out[0] & 0x80) == 0x80) {
1732        std::stringstream ss;
1733        ss << std::hex << (out[0] & 0xff);
1734        std::string res(ss.str());
1735        out = "byte: \\x" + res;
1736    }
1737
1738    return out;
1739}
1740
1741// format server-sent event (SSE), return the formatted string to send
1742// note: if data is a json array, it will be sent as multiple events, one per item
1743std::string format_oai_sse(const json & data) {
1744    std::ostringstream ss;
1745    auto send_single = [&ss](const json & data) {
1746        ss << "data: " <<
1747            safe_json_to_str(data) <<
1748            "\n\n"; // required by RFC 8895 - A message is terminated by a blank line (two line terminators in a row).
1749    };
1750
1751    if (data.is_array()) {
1752        for (const auto & item : data) {
1753            send_single(item);
1754        }
1755    } else {
1756        send_single(data);
1757    }
1758
1759    return ss.str();
1760}
1761
1762std::string format_oai_resp_sse(const json & data) {
1763    std::ostringstream ss;
1764    auto send_single = [&ss](const json & event_obj) {
1765        ss << "event: " << event_obj.at("event").get<std::string>() << "\n";
1766        ss << "data: " << safe_json_to_str(event_obj.at("data")) << "\n\n";
1767    };
1768
1769    if (data.is_array()) {
1770        for (const auto & item : data) {
1771            send_single(item);
1772        }
1773    } else {
1774        send_single(data);
1775    }
1776
1777    return ss.str();
1778}
1779
1780std::string format_anthropic_sse(const json & data) {
1781    std::ostringstream ss;
1782
1783    auto send_event = [&ss](const json & event_obj) {
1784        if (event_obj.contains("event") && event_obj.contains("data")) {
1785            ss << "event: " << event_obj.at("event").get<std::string>() << "\n";
1786            ss << "data: " << safe_json_to_str(event_obj.at("data")) << "\n\n";
1787        } else {
1788            ss << "data: " << safe_json_to_str(event_obj) << "\n\n";
1789        }
1790    };
1791
1792    if (data.is_array()) {
1793        for (const auto & event : data) {
1794            send_event(event);
1795        }
1796    } else {
1797        send_event(data);
1798    }
1799
1800    return ss.str();
1801}
1802
1803bool is_valid_utf8(const std::string & str) {
1804    const unsigned char* bytes = reinterpret_cast<const unsigned char*>(str.data());
1805    const unsigned char* end = bytes + str.length();
1806
1807    while (bytes < end) {
1808        if (*bytes <= 0x7F) {
1809            // 1-byte sequence (0xxxxxxx)
1810            bytes++;
1811        } else if ((*bytes & 0xE0) == 0xC0) {
1812            // 2-byte sequence (110xxxxx 10xxxxxx)
1813            if (end - bytes < 2 || (bytes[1] & 0xC0) != 0x80)
1814                return false;
1815            bytes += 2;
1816        } else if ((*bytes & 0xF0) == 0xE0) {
1817            // 3-byte sequence (1110xxxx 10xxxxxx 10xxxxxx)
1818            if (end - bytes < 3 || (bytes[1] & 0xC0) != 0x80 || (bytes[2] & 0xC0) != 0x80)
1819                return false;
1820            bytes += 3;
1821        } else if ((*bytes & 0xF8) == 0xF0) {
1822            // 4-byte sequence (11110xxx 10xxxxxx 10xxxxxx 10xxxxxx)
1823            if (end - bytes < 4 || (bytes[1] & 0xC0) != 0x80 ||
1824                (bytes[2] & 0xC0) != 0x80 || (bytes[3] & 0xC0) != 0x80)
1825                return false;
1826            bytes += 4;
1827        } else {
1828            // Invalid UTF-8 lead byte
1829            return false;
1830        }
1831    }
1832
1833    return true;
1834}
1835
1836llama_tokens format_prompt_infill(
1837        const llama_vocab * vocab,
1838        const json & input_prefix,
1839        const json & input_suffix,
1840        const json & input_extra,
1841        const int n_batch,
1842        const int n_predict,
1843        const int n_ctx,
1844        const bool spm_infill,
1845        const llama_tokens & tokens_prompt
1846    ) {
1847    // TODO: optimize this block by reducing memory allocations and movement
1848
1849    // use FIM repo-level pattern:
1850    // ref: https://arxiv.org/pdf/2409.12186
1851    //
1852    // [FIM_REP]myproject
1853    // [FIM_SEP]filename0
1854    // extra chunk 0
1855    // [FIM_SEP]filename1
1856    // extra chunk 1
1857    // ...
1858    // [FIM_SEP]filename
1859    // [FIM_PRE]prefix[FIM_SUF]suffix[FIM_MID]prompt
1860    //
1861    llama_tokens extra_tokens;
1862    extra_tokens.reserve(n_ctx);
1863
1864    auto tokens_prefix = tokenize_mixed(vocab, input_prefix, false, false);
1865    auto tokens_suffix = tokenize_mixed(vocab, input_suffix, false, false);
1866
1867    if (llama_vocab_fim_rep(vocab) != LLAMA_TOKEN_NULL) {
1868        // TODO: make project name an input
1869        static const auto k_fim_repo = common_tokenize(vocab, "myproject\n", false, false);
1870
1871        extra_tokens.push_back(llama_vocab_fim_rep(vocab));
1872        extra_tokens.insert(extra_tokens.end(), k_fim_repo.begin(), k_fim_repo.end());
1873    }
1874    for (const auto & chunk : input_extra) {
1875        // { "text": string, "filename": string }
1876        const std::string text     = json_value(chunk, "text",     std::string());
1877        const std::string filename = json_value(chunk, "filename", std::string("tmp"));
1878
1879        if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) {
1880            const auto k_fim_file = common_tokenize(vocab, filename + "\n", false, false);
1881
1882            extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab));
1883            extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
1884        } else {
1885            // chunk separator in binary form to avoid confusing the AI
1886            static const char k_chunk_prefix_str[] = {0x0a, 0x0a, 0x2d, 0x2d, 0x2d, 0x20, 0x73, 0x6e, 0x69, 0x70, 0x70, 0x65, 0x74, 0x20, 0x2d, 0x2d, 0x2d, 0x0a, 0x0a, 0x00};
1887            static const auto k_chunk_prefix_tokens = common_tokenize(vocab, k_chunk_prefix_str, false, false);
1888
1889            extra_tokens.insert(extra_tokens.end(), k_chunk_prefix_tokens.begin(), k_chunk_prefix_tokens.end());
1890        }
1891
1892        const auto chunk_tokens = common_tokenize(vocab, text, false, false);
1893        extra_tokens.insert(extra_tokens.end(), chunk_tokens.begin(), chunk_tokens.end());
1894    }
1895
1896    if (llama_vocab_fim_sep(vocab) != LLAMA_TOKEN_NULL) {
1897        // TODO: current filename
1898        static const auto k_fim_file = common_tokenize(vocab, "filename\n", false, false);
1899
1900        extra_tokens.insert(extra_tokens.end(), llama_vocab_fim_sep(vocab));
1901        extra_tokens.insert(extra_tokens.end(), k_fim_file.begin(), k_fim_file.end());
1902    }
1903
1904    // for now pick FIM context to fit in a batch (ratio prefix:suffix = 3:1, TODO: configurable?)
1905    const int n_prefix_take = std::min<int>(tokens_prefix.size(),                3*(n_batch/4));
1906    const int n_suffix_take = std::min<int>(tokens_suffix.size(), std::max<int>(0, (n_batch/4) - (2 + tokens_prompt.size())));
1907
1908    SRV_DBG("n_prefix_take = %d, n_suffix_take = %d, total = %d\n", n_prefix_take, n_suffix_take, (n_prefix_take + n_suffix_take));
1909
1910    // fill the rest of the context with extra chunks
1911    const int n_extra_take = std::min<int>(std::max<int>(0, n_ctx - (n_batch) - 2*n_predict), extra_tokens.size());
1912
1913    tokens_prefix.erase(tokens_prefix.begin(), tokens_prefix.begin() + tokens_prefix.size() - n_prefix_take);
1914    tokens_suffix.resize(n_suffix_take);
1915
1916    tokens_prefix.insert(tokens_prefix.begin(), llama_vocab_fim_pre(vocab));
1917    tokens_prefix.insert(tokens_prefix.end(),   tokens_prompt.begin(), tokens_prompt.end());
1918    tokens_suffix.insert(tokens_suffix.begin(), llama_vocab_fim_suf(vocab));
1919
1920    auto embd_inp = spm_infill ? tokens_suffix : tokens_prefix;
1921    auto embd_end = spm_infill ? tokens_prefix : tokens_suffix;
1922
1923    if (llama_vocab_get_add_bos(vocab)) {
1924        embd_inp.insert(embd_inp.begin(), llama_vocab_bos(vocab));
1925    }
1926
1927    SRV_DBG("extra: n_ctx = %d, n_extra_take = %d, n_extra = %d\n", n_ctx, n_extra_take, (int) extra_tokens.size());
1928
1929    // put the extra context before the FIM prefix
1930    embd_inp.insert(embd_inp.begin(), extra_tokens.end() - n_extra_take, extra_tokens.end());
1931
1932    embd_inp.insert(embd_inp.end(), embd_end.begin(), embd_end.end());
1933    embd_inp.push_back(llama_vocab_fim_mid(vocab));
1934
1935    return embd_inp;
1936}
1937
1938server_tokens format_prompt_rerank(
1939        const struct llama_model * model,
1940        const struct llama_vocab * vocab,
1941        mtmd_context * mctx,
1942        const std::string & query,
1943        const std::string & doc) {
1944    server_tokens result = {};
1945
1946    const char * rerank_prompt = llama_model_chat_template(model, "rerank");
1947
1948    if (rerank_prompt != nullptr) {
1949        std::string prompt = rerank_prompt;
1950        string_replace_all(prompt, "{query}"   , query);
1951        string_replace_all(prompt, "{document}", doc  );
1952        server_tokens tokens = tokenize_input_subprompt(vocab, mctx, prompt, false, true);
1953        result.push_back(tokens);
1954    } else {
1955        // Get EOS token - use SEP token as fallback if EOS is not available
1956        server_tokens query_tokens = tokenize_input_subprompt(vocab, mctx, query, false, false);
1957        server_tokens doc_tokens   = tokenize_input_subprompt(vocab, mctx, doc,   false, false);
1958        llama_token eos_token = llama_vocab_eos(vocab);
1959        if (eos_token == LLAMA_TOKEN_NULL) {
1960            eos_token = llama_vocab_sep(vocab);
1961        }
1962
1963        if (llama_vocab_get_add_bos(vocab)) {
1964            result.push_back(llama_vocab_bos(vocab));
1965        }
1966        result.push_back(query_tokens);
1967        if (llama_vocab_get_add_eos(vocab)) {
1968            result.push_back(eos_token);
1969        }
1970        if (llama_vocab_get_add_sep(vocab)) {
1971            result.push_back(llama_vocab_sep(vocab));
1972        }
1973        result.push_back(doc_tokens);
1974        if (llama_vocab_get_add_eos(vocab)) {
1975            result.push_back(eos_token);
1976        }
1977    }
1978
1979    return result;
1980}