1// Note: porting this file to C++ is a work in progress
   2
   3#ifdef _WIN32
   4#define WIN32_LEAN_AND_MEAN
   5#ifndef NOMINMAX
   6#   define NOMINMAX
   7#endif
   8#include <windows.h>
   9#endif
  10
  11#include "ggml-backend.h"
  12#include "ggml-backend-impl.h"
  13#include "ggml-alloc.h"
  14#include "ggml-impl.h"
  15
  16#include <assert.h>
  17#include <limits.h>
  18#include <stdarg.h>
  19#include <stdio.h>
  20#include <stdlib.h>
  21#include <string.h>
  22#include <algorithm>
  23#include <vector>
  24
  25#ifdef __APPLE__
  26#include <sys/types.h>
  27#include <sys/sysctl.h>
  28#endif
  29
  30
  31// backend buffer type
  32
  33const char * ggml_backend_buft_name(ggml_backend_buffer_type_t buft) {
  34    GGML_ASSERT(buft);
  35    return buft->iface.get_name(buft);
  36}
  37
  38ggml_backend_buffer_t ggml_backend_buft_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
  39    GGML_ASSERT(buft);
  40    if (size == 0) {
  41        // return a dummy buffer for zero-sized allocations
  42        return ggml_backend_buffer_init(buft, {}, NULL, 0);
  43    }
  44    return buft->iface.alloc_buffer(buft, size);
  45}
  46
  47size_t ggml_backend_buft_get_alignment(ggml_backend_buffer_type_t buft) {
  48    GGML_ASSERT(buft);
  49    return buft->iface.get_alignment(buft);
  50}
  51
  52size_t ggml_backend_buft_get_max_size(ggml_backend_buffer_type_t buft) {
  53    GGML_ASSERT(buft);
  54    // get_max_size is optional, defaults to SIZE_MAX
  55    if (buft->iface.get_max_size) {
  56        return buft->iface.get_max_size(buft);
  57    }
  58    return SIZE_MAX;
  59}
  60
  61size_t ggml_backend_buft_get_alloc_size(ggml_backend_buffer_type_t buft, const struct ggml_tensor * tensor) {
  62    GGML_ASSERT(buft);
  63    // get_alloc_size is optional, defaults to ggml_nbytes
  64    if (buft->iface.get_alloc_size) {
  65        size_t size = buft->iface.get_alloc_size(buft, tensor);
  66        assert(size >= ggml_nbytes(tensor));
  67        return size;
  68    }
  69    return ggml_nbytes(tensor);
  70}
  71
  72bool ggml_backend_buft_is_host(ggml_backend_buffer_type_t buft) {
  73    GGML_ASSERT(buft);
  74    if (buft->iface.is_host) {
  75        return buft->iface.is_host(buft);
  76    }
  77    return false;
  78}
  79
  80ggml_backend_dev_t ggml_backend_buft_get_device(ggml_backend_buffer_type_t buft) {
  81    GGML_ASSERT(buft);
  82    return buft->device;
  83}
  84
  85// backend buffer
  86
  87ggml_backend_buffer_t ggml_backend_buffer_init(
  88               ggml_backend_buffer_type_t buft,
  89        struct ggml_backend_buffer_i      iface,
  90               void *                     context,
  91               size_t                     size) {
  92    ggml_backend_buffer_t buffer = new ggml_backend_buffer {
  93        /* .interface = */ iface,
  94        /* .buft      = */ buft,
  95        /* .context   = */ context,
  96        /* .size      = */ size,
  97        /* .usage     = */ GGML_BACKEND_BUFFER_USAGE_ANY
  98    };
  99
 100    return buffer;
 101}
 102
 103const char * ggml_backend_buffer_name(ggml_backend_buffer_t buffer) {
 104    return ggml_backend_buft_name(ggml_backend_buffer_get_type(buffer));
 105}
 106
 107void ggml_backend_buffer_free(ggml_backend_buffer_t buffer) {
 108    if (buffer == NULL) {
 109        return;
 110    }
 111
 112    if (buffer->iface.free_buffer != NULL) {
 113        buffer->iface.free_buffer(buffer);
 114    }
 115    delete buffer;
 116}
 117
 118size_t ggml_backend_buffer_get_size(ggml_backend_buffer_t buffer) {
 119    GGML_ASSERT(buffer);
 120    return buffer->size;
 121}
 122
 123void * ggml_backend_buffer_get_base(ggml_backend_buffer_t buffer) {
 124    GGML_ASSERT(buffer);
 125    // get_base is optional if the buffer is zero-sized
 126    if (buffer->size == 0) {
 127        return NULL;
 128    }
 129
 130    // FIXME JG: a multi_buffer has a non-zero size, according to the above comment get_base is not optional,
 131    //     I don't know whether the above comment is correct
 132    if (!buffer->iface.get_base) {
 133        return NULL;
 134    }
 135
 136    void * base = buffer->iface.get_base(buffer);
 137
 138    GGML_ASSERT(base != NULL && "backend buffer base cannot be NULL");
 139
 140    return base;
 141}
 142
 143enum ggml_status ggml_backend_buffer_init_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor) {
 144    GGML_ASSERT(buffer);
 145    // init_tensor is optional
 146    if (buffer->iface.init_tensor) {
 147        return buffer->iface.init_tensor(buffer, tensor);
 148    }
 149    return GGML_STATUS_SUCCESS;
 150}
 151
 152void ggml_backend_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
 153    GGML_ASSERT(buffer);
 154    // clear is optional if the buffer is zero-sized
 155    if (buffer->size == 0) {
 156        return;
 157    }
 158
 159    buffer->iface.clear(buffer, value);
 160}
 161
 162size_t ggml_backend_buffer_get_alignment(ggml_backend_buffer_t buffer) {
 163    return ggml_backend_buft_get_alignment(ggml_backend_buffer_get_type(buffer));
 164}
 165
 166size_t ggml_backend_buffer_get_max_size(ggml_backend_buffer_t buffer) {
 167    return ggml_backend_buft_get_max_size(ggml_backend_buffer_get_type(buffer));
 168}
 169
 170size_t ggml_backend_buffer_get_alloc_size(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor) {
 171    return ggml_backend_buft_get_alloc_size(ggml_backend_buffer_get_type(buffer), tensor);
 172}
 173
 174bool ggml_backend_buffer_is_host(ggml_backend_buffer_t buffer) {
 175    return ggml_backend_buft_is_host(ggml_backend_buffer_get_type(buffer));
 176}
 177
 178void ggml_backend_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
 179    GGML_ASSERT(buffer);
 180    buffer->usage = usage;
 181
 182    // FIXME: add a generic callback to the buffer interface
 183    if (ggml_backend_buffer_is_multi_buffer(buffer)) {
 184        ggml_backend_multi_buffer_set_usage(buffer, usage);
 185    }
 186}
 187
 188enum ggml_backend_buffer_usage ggml_backend_buffer_get_usage(ggml_backend_buffer_t buffer) {
 189    GGML_ASSERT(buffer);
 190    return buffer->usage;
 191}
 192
 193ggml_backend_buffer_type_t ggml_backend_buffer_get_type(ggml_backend_buffer_t buffer) {
 194    GGML_ASSERT(buffer);
 195    return buffer->buft;
 196}
 197
 198void ggml_backend_buffer_reset(ggml_backend_buffer_t buffer) {
 199    GGML_ASSERT(buffer);
 200    if (buffer->iface.reset) {
 201        buffer->iface.reset(buffer);
 202    }
 203}
 204
 205bool ggml_backend_buffer_copy_tensor(const struct ggml_tensor * src, struct ggml_tensor * dst) {
 206    ggml_backend_buffer_t dst_buf = dst->view_src ? dst->view_src->buffer : dst->buffer;
 207    if (dst_buf->iface.cpy_tensor) {
 208        return dst_buf->iface.cpy_tensor(dst_buf, src, dst);
 209    }
 210    return false;
 211}
 212
 213// backend
 214
 215ggml_guid_t ggml_backend_guid(ggml_backend_t backend) {
 216    if (backend == NULL) {
 217        return NULL;
 218    }
 219    return backend->guid;
 220}
 221
 222const char * ggml_backend_name(ggml_backend_t backend) {
 223    if (backend == NULL) {
 224        return "NULL";
 225    }
 226    return backend->iface.get_name(backend);
 227}
 228
 229void ggml_backend_free(ggml_backend_t backend) {
 230    if (backend == NULL) {
 231        return;
 232    }
 233
 234    backend->iface.free(backend);
 235}
 236
 237ggml_backend_buffer_type_t ggml_backend_get_default_buffer_type(ggml_backend_t backend) {
 238    GGML_ASSERT(backend);
 239    return ggml_backend_dev_buffer_type(backend->device);
 240}
 241
 242ggml_backend_buffer_t ggml_backend_alloc_buffer(ggml_backend_t backend, size_t size) {
 243    return ggml_backend_buft_alloc_buffer(ggml_backend_get_default_buffer_type(backend), size);
 244}
 245
 246size_t ggml_backend_get_alignment(ggml_backend_t backend) {
 247    return ggml_backend_buft_get_alignment(ggml_backend_get_default_buffer_type(backend));
 248}
 249
 250size_t ggml_backend_get_max_size(ggml_backend_t backend) {
 251    return ggml_backend_buft_get_max_size(ggml_backend_get_default_buffer_type(backend));
 252}
 253
 254void ggml_backend_tensor_set_async(ggml_backend_t backend, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
 255    GGML_ASSERT(backend);
 256    GGML_ASSERT(tensor);
 257    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
 258    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
 259
 260    if (backend->iface.set_tensor_async == NULL) {
 261        ggml_backend_synchronize(backend);
 262        ggml_backend_tensor_set(tensor, data, offset, size);
 263    } else {
 264        backend->iface.set_tensor_async(backend, tensor, data, offset, size);
 265    }
 266}
 267
 268void ggml_backend_tensor_get_async(ggml_backend_t backend, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
 269    GGML_ASSERT(backend);
 270    GGML_ASSERT(tensor);
 271    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
 272    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
 273
 274    if (backend->iface.get_tensor_async == NULL) {
 275        ggml_backend_synchronize(backend);
 276        ggml_backend_tensor_get(tensor, data, offset, size);
 277    } else {
 278        backend->iface.get_tensor_async(backend, tensor, data, offset, size);
 279    }
 280}
 281
 282void ggml_backend_tensor_set(struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
 283    GGML_ASSERT(tensor);
 284    ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
 285
 286    if (size == 0) {
 287        return;
 288    }
 289
 290    GGML_ASSERT(buf != NULL && "tensor buffer not set");
 291    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
 292    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
 293
 294    buf->iface.set_tensor(buf, tensor, data, offset, size);
 295}
 296
 297void ggml_backend_tensor_get(const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
 298    GGML_ASSERT(tensor);
 299    ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
 300
 301    if (size == 0) {
 302        return;
 303    }
 304
 305    GGML_ASSERT(buf != NULL && "tensor buffer not set");
 306    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
 307    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor read out of bounds");
 308
 309    buf->iface.get_tensor(buf, tensor, data, offset, size);
 310}
 311
 312void ggml_backend_tensor_memset(struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
 313    GGML_ASSERT(tensor);
 314    ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
 315
 316    if (size == 0) {
 317        return;
 318    }
 319
 320    GGML_ASSERT(buf != NULL && "tensor buffer not set");
 321    GGML_ASSERT(tensor->data != NULL && "tensor not allocated");
 322    GGML_ASSERT(offset + size <= ggml_nbytes(tensor) && "tensor write out of bounds");
 323    GGML_ASSERT(buf->iface.memset_tensor != NULL && "memset not implemented by backend buffer");
 324
 325    buf->iface.memset_tensor(buf, tensor, value, offset, size);
 326}
 327
 328void ggml_backend_synchronize(ggml_backend_t backend) {
 329    GGML_ASSERT(backend);
 330    if (backend->iface.synchronize == NULL) {
 331        return;
 332    }
 333
 334    backend->iface.synchronize(backend);
 335}
 336
 337ggml_backend_graph_plan_t ggml_backend_graph_plan_create(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
 338    GGML_ASSERT(backend);
 339    GGML_ASSERT(backend->iface.graph_plan_create != NULL);
 340
 341    return backend->iface.graph_plan_create(backend, cgraph);
 342}
 343
 344void ggml_backend_graph_plan_free(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
 345    GGML_ASSERT(backend);
 346    GGML_ASSERT(backend->iface.graph_plan_free != NULL);
 347
 348    backend->iface.graph_plan_free(backend, plan);
 349}
 350
 351enum ggml_status ggml_backend_graph_plan_compute(ggml_backend_t backend, ggml_backend_graph_plan_t plan) {
 352    GGML_ASSERT(backend);
 353    GGML_ASSERT(backend->iface.graph_plan_compute != NULL);
 354
 355    return backend->iface.graph_plan_compute(backend, plan);
 356}
 357
 358enum ggml_status ggml_backend_graph_compute(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
 359    enum ggml_status err = ggml_backend_graph_compute_async(backend, cgraph);
 360    ggml_backend_synchronize(backend);
 361    return err;
 362}
 363
 364enum ggml_status ggml_backend_graph_compute_async(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
 365    GGML_ASSERT(backend);
 366    return backend->iface.graph_compute(backend, cgraph);
 367}
 368
 369bool ggml_backend_supports_op(ggml_backend_t backend, const struct ggml_tensor * op) {
 370    GGML_ASSERT(backend);
 371    return ggml_backend_dev_supports_op(backend->device, op);
 372}
 373
 374bool ggml_backend_supports_buft(ggml_backend_t backend, ggml_backend_buffer_type_t buft) {
 375    GGML_ASSERT(backend);
 376    return ggml_backend_dev_supports_buft(backend->device, buft);
 377}
 378
 379bool ggml_backend_offload_op(ggml_backend_t backend, const struct ggml_tensor * op) {
 380    GGML_ASSERT(backend);
 381    return ggml_backend_dev_offload_op(backend->device, op);
 382}
 383
 384ggml_backend_dev_t ggml_backend_get_device(ggml_backend_t backend) {
 385    GGML_ASSERT(backend);
 386    return backend->device;
 387}
 388
 389// backend copy
 390
 391void ggml_backend_tensor_copy(struct ggml_tensor * src, struct ggml_tensor * dst) {
 392    GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
 393
 394    if (src == dst) {
 395        return;
 396    }
 397
 398    if (ggml_backend_buffer_is_host(src->buffer)) {
 399        ggml_backend_tensor_set(dst, src->data, 0, ggml_nbytes(src));
 400    } else if (ggml_backend_buffer_is_host(dst->buffer)) {
 401        ggml_backend_tensor_get(src, dst->data, 0, ggml_nbytes(src));
 402    } else if (!ggml_backend_buffer_copy_tensor(src, dst)) {
 403#ifndef NDEBUG
 404        GGML_LOG_DEBUG("%s: warning: slow copy from %s to %s\n", __func__, ggml_backend_buffer_name(src->buffer), ggml_backend_buffer_name(dst->buffer));
 405#endif
 406        size_t nbytes = ggml_nbytes(src);
 407        void * data = malloc(nbytes);
 408        ggml_backend_tensor_get(src, data, 0, nbytes);
 409        ggml_backend_tensor_set(dst, data, 0, nbytes);
 410        free(data);
 411    }
 412}
 413
 414void ggml_backend_tensor_copy_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, struct ggml_tensor * src, struct ggml_tensor * dst) {
 415    GGML_ASSERT(ggml_are_same_layout(src, dst) && "cannot copy tensors with different layouts");
 416
 417    if (src == dst) {
 418        return;
 419    }
 420
 421    GGML_ASSERT(backend_dst);
 422    if (backend_dst->iface.cpy_tensor_async != NULL) {
 423        if (backend_dst->iface.cpy_tensor_async(backend_src, backend_dst, src, dst)) {
 424            return;
 425        }
 426    }
 427
 428    // an async copy would normally happen after all the queued operations on both backends are completed
 429    // to simulate the same behavior, we need to synchronize both backends first, and do a blocking copy
 430    ggml_backend_synchronize(backend_src);
 431    ggml_backend_synchronize(backend_dst);
 432    ggml_backend_tensor_copy(src, dst);
 433}
 434
 435// events
 436
 437ggml_backend_event_t ggml_backend_event_new(ggml_backend_dev_t device) {
 438    // null device is allowed for the transition period to the device interface
 439    if (device == NULL || device->iface.event_new == NULL) {
 440        return NULL;
 441    }
 442    return device->iface.event_new(device);
 443}
 444
 445void ggml_backend_event_free(ggml_backend_event_t event) {
 446    if (event == NULL) {
 447        return;
 448    }
 449    event->device->iface.event_free(event->device, event);
 450}
 451
 452void ggml_backend_event_record(ggml_backend_event_t event, ggml_backend_t backend) {
 453    GGML_ASSERT(backend);
 454    GGML_ASSERT(backend->iface.event_record != NULL);
 455
 456    backend->iface.event_record(backend, event);
 457}
 458
 459void ggml_backend_event_synchronize(ggml_backend_event_t event) {
 460    GGML_ASSERT(event);
 461    GGML_ASSERT(event->device->iface.event_synchronize);
 462
 463    event->device->iface.event_synchronize(event->device, event);
 464}
 465
 466void ggml_backend_event_wait(ggml_backend_t backend, ggml_backend_event_t event) {
 467    GGML_ASSERT(backend);
 468    GGML_ASSERT(backend->iface.event_wait != NULL);
 469
 470    backend->iface.event_wait(backend, event);
 471}
 472
 473static void ggml_backend_graph_optimize(ggml_backend_t backend, struct ggml_cgraph * cgraph) {
 474    GGML_ASSERT(backend);
 475    if (backend->iface.graph_optimize != NULL) {
 476        backend->iface.graph_optimize(backend, cgraph);
 477    }
 478}
 479
 480// Backend device
 481
 482const char * ggml_backend_dev_name(ggml_backend_dev_t device) {
 483    GGML_ASSERT(device);
 484    return device->iface.get_name(device);
 485}
 486
 487const char * ggml_backend_dev_description(ggml_backend_dev_t device) {
 488    GGML_ASSERT(device);
 489    return device->iface.get_description(device);
 490}
 491
 492void ggml_backend_dev_memory(ggml_backend_dev_t device, size_t * free, size_t * total) {
 493    GGML_ASSERT(device);
 494    device->iface.get_memory(device, free, total);
 495}
 496
 497enum ggml_backend_dev_type ggml_backend_dev_type(ggml_backend_dev_t device) {
 498    GGML_ASSERT(device);
 499    return device->iface.get_type(device);
 500}
 501
 502void ggml_backend_dev_get_props(ggml_backend_dev_t device, struct ggml_backend_dev_props * props) {
 503    memset(props, 0, sizeof(*props));
 504    device->iface.get_props(device, props);
 505}
 506
 507ggml_backend_reg_t ggml_backend_dev_backend_reg(ggml_backend_dev_t device) {
 508    GGML_ASSERT(device);
 509    return device->reg;
 510}
 511
 512ggml_backend_t ggml_backend_dev_init(ggml_backend_dev_t device, const char * params) {
 513    GGML_ASSERT(device);
 514    return device->iface.init_backend(device, params);
 515}
 516
 517ggml_backend_buffer_type_t ggml_backend_dev_buffer_type(ggml_backend_dev_t device) {
 518    GGML_ASSERT(device);
 519    return device->iface.get_buffer_type(device);
 520}
 521
 522ggml_backend_buffer_type_t ggml_backend_dev_host_buffer_type(ggml_backend_dev_t device) {
 523    GGML_ASSERT(device);
 524    if (device->iface.get_host_buffer_type == NULL) {
 525        return NULL;
 526    }
 527
 528    return device->iface.get_host_buffer_type(device);
 529}
 530
 531ggml_backend_buffer_t ggml_backend_dev_buffer_from_host_ptr(ggml_backend_dev_t device, void * ptr, size_t size, size_t max_tensor_size) {
 532    GGML_ASSERT(device);
 533    return device->iface.buffer_from_host_ptr(device, ptr, size, max_tensor_size);
 534}
 535
 536bool ggml_backend_dev_supports_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
 537    GGML_ASSERT(device);
 538    return device->iface.supports_op(device, op);
 539}
 540
 541bool ggml_backend_dev_supports_buft(ggml_backend_dev_t device, ggml_backend_buffer_type_t buft) {
 542    GGML_ASSERT(device);
 543    return device->iface.supports_buft(device, buft);
 544}
 545
 546bool ggml_backend_dev_offload_op(ggml_backend_dev_t device, const struct ggml_tensor * op) {
 547    GGML_ASSERT(device);
 548    if (device->iface.offload_op != NULL) {
 549        return device->iface.offload_op(device, op);
 550    }
 551
 552    return false;
 553}
 554
 555// Backend (reg)
 556
 557const char * ggml_backend_reg_name(ggml_backend_reg_t reg) {
 558    GGML_ASSERT(reg);
 559    return reg->iface.get_name(reg);
 560}
 561
 562size_t ggml_backend_reg_dev_count(ggml_backend_reg_t reg) {
 563    GGML_ASSERT(reg);
 564    return reg->iface.get_device_count(reg);
 565}
 566
 567ggml_backend_dev_t ggml_backend_reg_dev_get(ggml_backend_reg_t reg, size_t index) {
 568    GGML_ASSERT(reg);
 569    return reg->iface.get_device(reg, index);
 570}
 571
 572void * ggml_backend_reg_get_proc_address(ggml_backend_reg_t reg, const char * name) {
 573    GGML_ASSERT(reg);
 574    if (!reg->iface.get_proc_address) {
 575        return NULL;
 576    }
 577    return reg->iface.get_proc_address(reg, name);
 578}
 579
 580// multi-buffer buffer
 581
 582struct ggml_backend_multi_buffer_context {
 583    ggml_backend_buffer_t * buffers;
 584    size_t n_buffers;
 585};
 586
 587static void ggml_backend_multi_buffer_free_buffer(ggml_backend_buffer_t buffer) {
 588    GGML_ASSERT(buffer);
 589    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
 590    for (size_t i = 0; i < ctx->n_buffers; i++) {
 591        ggml_backend_buffer_free(ctx->buffers[i]);
 592    }
 593
 594    free(ctx->buffers);
 595    free(ctx);
 596}
 597
 598static void ggml_backend_multi_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
 599    GGML_ASSERT(buffer);
 600    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
 601    for (size_t i = 0; i < ctx->n_buffers; i++) {
 602        ggml_backend_buffer_clear(ctx->buffers[i], value);
 603    }
 604}
 605
 606static const struct ggml_backend_buffer_i ggml_backend_multi_buffer_i = {
 607    /* .free_buffer     = */ ggml_backend_multi_buffer_free_buffer,
 608    /* .get_base        = */ NULL,
 609    /* .init_tensor     = */ NULL,
 610    /* .memset_tensor   = */ NULL,
 611    /* .set_tensor      = */ NULL,
 612    /* .get_tensor      = */ NULL,
 613    /* .cpy_tensor      = */ NULL,
 614    /* .clear           = */ ggml_backend_multi_buffer_clear,
 615    /* .reset           = */ NULL,
 616};
 617
 618ggml_backend_buffer_t ggml_backend_multi_buffer_alloc_buffer(ggml_backend_buffer_t * buffers, size_t n_buffers) {
 619    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) malloc(sizeof(struct ggml_backend_multi_buffer_context));
 620    ctx->n_buffers = n_buffers;
 621    ctx->buffers = (ggml_backend_buffer_t *) malloc(n_buffers * sizeof(ggml_backend_buffer_t));
 622
 623    GGML_ASSERT(ctx->buffers != NULL);
 624
 625    size_t total_size = 0;
 626    for (size_t i = 0; i < n_buffers; i++) {
 627        ctx->buffers[i] = buffers[i];
 628        total_size += ggml_backend_buffer_get_size(buffers[i]);
 629    }
 630
 631    return ggml_backend_buffer_init(buffers[0]->buft, ggml_backend_multi_buffer_i, ctx, total_size);
 632}
 633
 634bool ggml_backend_buffer_is_multi_buffer(ggml_backend_buffer_t buffer) {
 635    GGML_ASSERT(buffer);
 636    return buffer->iface.free_buffer == ggml_backend_multi_buffer_free_buffer;
 637}
 638
 639void ggml_backend_multi_buffer_set_usage(ggml_backend_buffer_t buffer, enum ggml_backend_buffer_usage usage) {
 640    GGML_ASSERT(buffer);
 641    GGML_ASSERT(ggml_backend_buffer_is_multi_buffer(buffer));
 642    ggml_backend_multi_buffer_context * ctx = (ggml_backend_multi_buffer_context *) buffer->context;
 643    for (size_t i = 0; i < ctx->n_buffers; i++) {
 644        ggml_backend_buffer_set_usage(ctx->buffers[i], usage);
 645    }
 646}
 647
 648// creates a copy of the tensor with the same memory layout
 649static struct ggml_tensor * ggml_dup_tensor_layout(struct ggml_context * ctx, const struct ggml_tensor * tensor) {
 650    struct ggml_tensor * dup = ggml_dup_tensor(ctx, tensor);
 651    for (int i = 0; i < GGML_MAX_DIMS; i++) {
 652        dup->nb[i] = tensor->nb[i];
 653    }
 654    return dup;
 655}
 656
 657static bool ggml_is_view_op(enum ggml_op op) {
 658    return op == GGML_OP_VIEW || op == GGML_OP_RESHAPE || op == GGML_OP_PERMUTE || op == GGML_OP_TRANSPOSE;
 659}
 660
 661// scheduler
 662
 663#ifndef GGML_SCHED_MAX_BACKENDS
 664#define GGML_SCHED_MAX_BACKENDS 16
 665#endif
 666
 667#ifndef GGML_SCHED_MAX_SPLIT_INPUTS
 668#define GGML_SCHED_MAX_SPLIT_INPUTS 30
 669#endif
 670
 671#ifndef GGML_SCHED_MAX_COPIES
 672#define GGML_SCHED_MAX_COPIES 4
 673#endif
 674
 675struct ggml_backend_sched_split {
 676    int backend_id;
 677    int i_start;
 678    int i_end;
 679    struct ggml_tensor * inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
 680    int n_inputs;
 681    // graph view of this split
 682    struct ggml_cgraph graph;
 683};
 684
 685struct ggml_backend_sched {
 686    bool is_reset; // true if the scheduler has been reset since the last graph split
 687    bool is_alloc;
 688
 689    int n_backends;
 690
 691    ggml_backend_t backends[GGML_SCHED_MAX_BACKENDS];
 692    ggml_backend_buffer_type_t bufts[GGML_SCHED_MAX_BACKENDS];
 693    ggml_gallocr_t galloc;
 694
 695    // hash map of the nodes in the graph
 696    struct ggml_hash_set  hash_set;
 697    int                 * hv_tensor_backend_ids; // [hash_set.size]
 698    struct ggml_tensor ** hv_tensor_copies;      // [hash_set.size][n_backends][n_copies]
 699
 700    int * node_backend_ids; // [graph_size]
 701    int * leaf_backend_ids; // [graph_size]
 702
 703    int * prev_node_backend_ids; // [graph_size]
 704    int * prev_leaf_backend_ids; // [graph_size]
 705
 706    // copy of the graph with modified inputs
 707    struct ggml_cgraph graph;
 708
 709    // graph splits
 710    struct ggml_backend_sched_split * splits;
 711    int n_splits;
 712    int splits_capacity;
 713
 714    // pipeline parallelism support
 715    int n_copies;
 716    int cur_copy;
 717    int next_copy;
 718    ggml_backend_event_t events[GGML_SCHED_MAX_BACKENDS][GGML_SCHED_MAX_COPIES];
 719    struct ggml_tensor * graph_inputs[GGML_SCHED_MAX_SPLIT_INPUTS];
 720    int n_graph_inputs;
 721
 722    struct ggml_context * ctx;
 723
 724    ggml_backend_sched_eval_callback callback_eval;
 725    void * callback_eval_user_data;
 726
 727    char * context_buffer;
 728    size_t context_buffer_size;
 729
 730    bool op_offload;
 731
 732    int debug;
 733
 734    // used for debugging graph reallocations [GGML_SCHED_DEBUG_REALLOC]
 735    // ref: https://github.com/ggml-org/llama.cpp/pull/17617
 736    int debug_realloc;
 737    int debug_graph_size;
 738    int debug_prev_graph_size;
 739};
 740
 741#define hash_id(tensor) ggml_hash_find_or_insert(&sched->hash_set, tensor)
 742#define tensor_backend_id(tensor) sched->hv_tensor_backend_ids[hash_id(tensor)]
 743#define tensor_id_copy(id, backend_id, copy_id) sched->hv_tensor_copies[(id) * sched->n_backends * sched->n_copies + (backend_id) * sched->n_copies + (copy_id)]
 744#define tensor_copy(tensor, backend_id, copy_id) tensor_id_copy(hash_id(tensor), backend_id, copy_id)
 745
 746// returns the priority of the backend, lower id is higher priority
 747static int ggml_backend_sched_backend_id(ggml_backend_sched_t sched, ggml_backend_t backend) {
 748    for (int i = 0; i < sched->n_backends; i++) {
 749        if (sched->backends[i] == backend) {
 750            return i;
 751        }
 752    }
 753    return -1;
 754}
 755
 756static int ggml_backend_sched_backend_from_buffer(ggml_backend_sched_t sched, const struct ggml_tensor * tensor, const struct ggml_tensor * op) {
 757    ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
 758    if (buffer == NULL) {
 759        return -1;
 760    }
 761
 762    // find highest prio backend that supports the buffer type and the op
 763    for (int i = 0; i < sched->n_backends; i++) {
 764        if (ggml_backend_supports_buft(sched->backends[i], buffer->buft) &&
 765            ggml_backend_supports_op(sched->backends[i], op)) {
 766            return i;
 767        }
 768    }
 769
 770#ifndef NDEBUG
 771    GGML_LOG_DEBUG("%s: warning: no backend supports op %s with a weight with buffer type %s used in tensor %s, the weight will need to be copied\n",
 772        __func__, ggml_op_desc(tensor), ggml_backend_buffer_name(buffer), tensor->name);
 773#endif
 774
 775    return -1;
 776}
 777
 778#if 0
 779#define GGML_SCHED_MAX_SPLITS_DEBUG 4096
 780static char causes[GGML_DEFAULT_GRAPH_SIZE*16 + GGML_SCHED_MAX_SPLITS_DEBUG*GGML_SCHED_MAX_SPLIT_INPUTS][128]; // debug only
 781#define SET_CAUSE(node, ...) sprintf(causes[hash_id(node)], __VA_ARGS__)
 782#define GET_CAUSE(node) causes[hash_id(node)]
 783#else
 784#define SET_CAUSE(node, ...)
 785#define GET_CAUSE(node) ""
 786#endif
 787
 788// returns the backend that should be used for the node based on the current locations
 789static int ggml_backend_sched_backend_id_from_cur(ggml_backend_sched_t sched, struct ggml_tensor * tensor) {
 790    // assign pre-allocated nodes to their backend
 791    int cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor, tensor);
 792    if (cur_backend_id != -1) {
 793        SET_CAUSE(tensor, "1.dst");
 794        return cur_backend_id;
 795    }
 796
 797    // view_src
 798    if (tensor->view_src != NULL) {
 799        cur_backend_id = ggml_backend_sched_backend_from_buffer(sched, tensor->view_src, tensor);
 800        if (cur_backend_id != -1) {
 801            SET_CAUSE(tensor, "1.vsrc");
 802            return cur_backend_id;
 803        }
 804    }
 805
 806    if (tensor->buffer || (tensor->view_src && tensor->view_src->buffer)) {
 807        // since the tensor is pre-allocated, it cannot be moved to another backend
 808        ggml_backend_buffer_t buffer = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
 809        GGML_ABORT("pre-allocated tensor (%s) in a buffer (%s) that cannot run the operation (%s)", tensor->name, ggml_backend_buffer_name(buffer), ggml_op_name(tensor->op));
 810    }
 811
 812    // graph input
 813    if (tensor->flags & GGML_TENSOR_FLAG_INPUT) {
 814        cur_backend_id = sched->n_backends - 1; // last backend (assumed CPU)
 815        SET_CAUSE(tensor, "1.inp");
 816        return cur_backend_id;
 817    }
 818
 819    // operations with weights are preferably run on the same backend as the weights
 820    for (int i = 0; i < GGML_MAX_SRC; i++) {
 821        const struct ggml_tensor * src = tensor->src[i];
 822        if (src == NULL) {
 823            continue;
 824        }
 825        // skip ROPE since the rope freqs tensor is too small to choose a backend based on it
 826        // not an ideal solution
 827        if (tensor->op != GGML_OP_ROPE && src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
 828            int src_backend_id = ggml_backend_sched_backend_from_buffer(sched, src, tensor);
 829            // check if a backend with higher prio wants to offload the op
 830            if (sched->op_offload && src_backend_id == sched->n_backends - 1 && ggml_backend_buffer_is_host(src->buffer)) {
 831                for (int b = 0; b < src_backend_id; b++) {
 832                    if (ggml_backend_supports_op(sched->backends[b], tensor) && ggml_backend_offload_op(sched->backends[b], tensor)) {
 833                        SET_CAUSE(tensor, "1.off");
 834                        return b;
 835                    }
 836                }
 837            }
 838            SET_CAUSE(tensor, "1.wgt%d", i);
 839            return src_backend_id;
 840        }
 841    }
 842
 843    return -1;
 844}
 845
 846static char * fmt_size(size_t size) {
 847    static char buffer[128];
 848    if (size >= 1024*1024) {
 849        snprintf(buffer, sizeof(buffer), "%zuM", size/1024/1024);
 850    } else {
 851        snprintf(buffer, sizeof(buffer), "%zuK", size/1024);
 852    }
 853    return buffer;
 854}
 855
 856static void ggml_backend_sched_print_assignments(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
 857    int cur_split = 0;
 858    for (int i = 0; i < graph->n_nodes; i++) {
 859        if (cur_split < sched->n_splits && i == sched->splits[cur_split].i_start) {
 860            ggml_backend_t split_backend = sched->backends[sched->splits[cur_split].backend_id];
 861            GGML_LOG_DEBUG("\n## SPLIT #%d: %s # %d inputs", cur_split, ggml_backend_name(split_backend),
 862                sched->splits[cur_split].n_inputs);
 863            for (int j = 0; j < sched->splits[cur_split].n_inputs; j++) {
 864                if (j == 0) {
 865                    GGML_LOG_DEBUG(": ");
 866                }
 867                GGML_LOG_DEBUG("[%s (%5.5s)] ", sched->splits[cur_split].inputs[j]->name,
 868                    fmt_size(ggml_nbytes(sched->splits[cur_split].inputs[j])));
 869            }
 870            GGML_LOG_DEBUG("\n");
 871            cur_split++;
 872        }
 873        struct ggml_tensor * node = graph->nodes[i];
 874        if (ggml_is_view_op(node->op)) {
 875            continue;
 876        }
 877        if (sched->debug > 1) {
 878            ggml_backend_t tensor_backend = ggml_backend_sched_get_tensor_backend(sched, node);
 879            GGML_LOG_DEBUG("node #%3d (%10.10s): %20.20s (%5.5s) [%5.5s %8.8s] use=%d,c=%d:", i, ggml_op_name(node->op), node->name,
 880                fmt_size(ggml_nbytes(node)), tensor_backend ? ggml_backend_name(tensor_backend) : "NULL", GET_CAUSE(node),
 881                graph->use_counts[ggml_hash_find(&graph->visited_hash_set, node)], node->flags & GGML_TENSOR_FLAG_COMPUTE ? 1 : 0);
 882            for (int j = 0; j < GGML_MAX_SRC; j++) {
 883                struct ggml_tensor * src = node->src[j];
 884                if (src == NULL) {
 885                    continue;
 886                }
 887                ggml_backend_t src_backend = ggml_backend_sched_get_tensor_backend(sched, src);
 888                GGML_LOG_DEBUG(" %20.20s (%5.5s) [%5.5s %8.8s]", src->name,
 889                    fmt_size(ggml_nbytes(src)), src_backend ? ggml_backend_name(src_backend) : "NULL", GET_CAUSE(src));
 890            }
 891            GGML_LOG_DEBUG("\n");
 892        }
 893    }
 894}
 895
 896static bool ggml_backend_sched_buffer_supported(ggml_backend_sched_t sched, struct ggml_tensor * t, int backend_id) {
 897    ggml_backend_buffer_t buf = t->view_src ? t->view_src->buffer : t->buffer;
 898    ggml_backend_buffer_type_t buft = NULL;
 899
 900    if (buf) {
 901        // the tensor is already allocated
 902        buft = buf->buft;
 903    } else {
 904        // see if the tensor already has a backend assigned, and use the buffer type of that backend
 905        int tensor_backend_id = tensor_backend_id(t);
 906        if (tensor_backend_id == -1 && t->view_src) {
 907            tensor_backend_id = tensor_backend_id(t->view_src);
 908        }
 909        if (tensor_backend_id != -1) {
 910            buft = sched->bufts[tensor_backend_id];
 911        }
 912    }
 913
 914    return buft != NULL && ggml_backend_supports_buft(sched->backends[backend_id], buft);
 915}
 916
 917static void ggml_backend_sched_set_if_supported(ggml_backend_sched_t sched, struct ggml_tensor * node, int cur_backend_id, int * node_backend_id) {
 918    if (ggml_backend_supports_op(sched->backends[cur_backend_id], node)) {
 919        *node_backend_id = cur_backend_id;
 920        SET_CAUSE(node, "2.sup");
 921    }
 922}
 923
 924// assigns backends to ops and splits the graph into subgraphs that can be computed on the same backend
 925void ggml_backend_sched_split_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
 926    // reset splits
 927    sched->n_splits = 0;
 928    sched->n_graph_inputs = 0;
 929    sched->is_reset = false;
 930
 931    struct ggml_init_params params = {
 932        /* .mem_size =   */ sched->context_buffer_size,
 933        /* .mem_buffer = */ sched->context_buffer,
 934        /* .no_alloc =   */ true
 935    };
 936
 937    ggml_free(sched->ctx);
 938
 939    sched->ctx = ggml_init(params);
 940    if (sched->ctx == NULL) {
 941        GGML_ABORT("%s: failed to initialize context\n", __func__);
 942    }
 943
 944    // pass 1: assign backends to ops with pre-allocated inputs
 945    for (int i = 0; i < graph->n_leafs; i++) {
 946        struct ggml_tensor * leaf = graph->leafs[i];
 947        int * leaf_backend_id = &tensor_backend_id(leaf);
 948        // do not overwrite user assignments
 949        if (*leaf_backend_id == -1) {
 950            *leaf_backend_id = ggml_backend_sched_backend_id_from_cur(sched, leaf);
 951        }
 952    }
 953
 954    for (int i = 0; i < graph->n_nodes; i++) {
 955        struct ggml_tensor * node = graph->nodes[i];
 956        int * node_backend_id = &tensor_backend_id(node);
 957        // do not overwrite user assignments
 958        if (*node_backend_id == -1) {
 959            *node_backend_id = ggml_backend_sched_backend_id_from_cur(sched, node);
 960
 961#if 0
 962            // src
 963            if (node->op == GGML_OP_NONE) {
 964                continue;
 965            }
 966
 967            for (int j = 0; j < GGML_MAX_SRC; j++) {
 968                struct ggml_tensor * src = node->src[j];
 969                if (src == NULL) {
 970                    continue;
 971                }
 972                int * src_backend_id = &tensor_backend_id(src);
 973                if (*src_backend_id == -1) {
 974                    *src_backend_id = ggml_backend_sched_backend_id_from_cur(sched, src);
 975                }
 976            }
 977#endif
 978        }
 979    }
 980
 981    // pass 2: expand current backend assignments
 982    // assign the same backend to adjacent nodes
 983    // expand gpu backends (i.e. non last prio) up and down, ignoring cpu (the lowest priority backend)
 984    // thus, cpu will never be used unless weights are on cpu, or there are no gpu ops between cpu ops
 985    // ops unsupported by the backend being expanded will be left unassigned so that they can be assigned later when the locations of its inputs are known
 986    // expand gpu down
 987    {
 988        int cur_backend_id = -1;
 989        for (int i = 0; i < graph->n_nodes; i++) {
 990            struct ggml_tensor * node = graph->nodes[i];
 991            if (ggml_is_view_op(node->op)) {
 992                continue;
 993            }
 994            int * node_backend_id = &tensor_backend_id(node);
 995            if (*node_backend_id != -1) {
 996                if (*node_backend_id == sched->n_backends - 1) {
 997                    // skip cpu (lowest prio backend)
 998                    cur_backend_id = -1;
 999                } else {
1000                    cur_backend_id = *node_backend_id;
1001                }
1002            } else if (cur_backend_id != -1) {
1003                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1004            }
1005        }
1006    }
1007    // expand gpu up
1008    {
1009        int cur_backend_id = -1;
1010        for (int i = graph->n_nodes - 1; i >= 0; i--) {
1011            struct ggml_tensor * node = graph->nodes[i];
1012            if (ggml_is_view_op(node->op)) {
1013                continue;
1014            }
1015            int * node_backend_id = &tensor_backend_id(node);
1016            if (*node_backend_id != -1) {
1017                if (*node_backend_id == sched->n_backends - 1) {
1018                    // skip cpu (lowest prio backend)
1019                    cur_backend_id = -1;
1020                } else {
1021                    cur_backend_id = *node_backend_id;
1022                }
1023            } else if (cur_backend_id != -1) {
1024                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1025            }
1026        }
1027    }
1028    // expand rest down
1029    {
1030        int cur_backend_id = -1;
1031        for (int i = 0; i < graph->n_nodes; i++) {
1032            struct ggml_tensor * node = graph->nodes[i];
1033            if (ggml_is_view_op(node->op)) {
1034                continue;
1035            }
1036            int * node_backend_id = &tensor_backend_id(node);
1037            if (*node_backend_id != -1) {
1038                cur_backend_id = *node_backend_id;
1039            } else if (cur_backend_id != -1) {
1040                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1041            }
1042        }
1043    }
1044    // expand rest up
1045    {
1046        int cur_backend_id = -1;
1047        for (int i = graph->n_nodes - 1; i >= 0; i--) {
1048            struct ggml_tensor * node = graph->nodes[i];
1049            if (ggml_is_view_op(node->op)) {
1050                continue;
1051            }
1052            int * node_backend_id = &tensor_backend_id(node);
1053            if (*node_backend_id != -1) {
1054                cur_backend_id = *node_backend_id;
1055            } else if (cur_backend_id != -1) {
1056                ggml_backend_sched_set_if_supported(sched, node, cur_backend_id, node_backend_id);
1057            }
1058        }
1059    }
1060
1061    // pass 3: upgrade nodes to higher prio backends with compatible buffer types
1062    // if the tensor is already in the same buffer type (*) as another higher priority backend, we should move it there
1063    // however, we also need to verify that the sources are in compatible buffer types
1064    // (*) the actual requirement is more relaxed, the buffer type of the backend should be supported by all the users of this tensor further down the graph
1065    // however, this is slow to verify, so we have a more strict requirement that the buffer type is the same
1066    // this is not uncommon since multiple backends can use host memory, with the same buffer type (eg. BLAS and CPU)
1067    // additionally, set remaining unassigned nodes to the backend with the most supported inputs
1068    // only nodes that could not be assigned during expansion due to the backend not supporting the op should be unassigned at this point
1069    for (int i = 0; i < graph->n_nodes; i++) {
1070        struct ggml_tensor * node = graph->nodes[i];
1071        if (ggml_is_view_op(node->op)) {
1072            continue;
1073        }
1074        int * node_backend_id = &tensor_backend_id(node);
1075        if (*node_backend_id == -1) {
1076            // unassigned node: find the backend with the most supported inputs
1077            int n_supported_best = -1;
1078            for (int b = 0; b < sched->n_backends; b++) {
1079                if (ggml_backend_supports_op(sched->backends[b], node)) {
1080                    int n_supported = 0;
1081                    for (int j = 0; j < GGML_MAX_SRC; j++) {
1082                        struct ggml_tensor * src = node->src[j];
1083                        if (src == NULL) {
1084                            continue;
1085                        }
1086                        if ((tensor_backend_id(src) != -1 || tensor_backend_id(src->view_src) != -1) && ggml_backend_sched_buffer_supported(sched, src, b)) {
1087                            n_supported++;
1088                        }
1089                    }
1090                    if (n_supported > n_supported_best) {
1091                        n_supported_best = n_supported;
1092                        *node_backend_id = b;
1093                        SET_CAUSE(node, "3.best");
1094                    }
1095                }
1096            }
1097        } else {
1098            // assigned node: upgrade to higher prio backend if possible
1099            for (int b = 0; b < *node_backend_id; b++) {
1100                if (sched->bufts[b] == sched->bufts[*node_backend_id] && ggml_backend_supports_op(sched->backends[b], node)) {
1101                    bool supported = true;
1102                    for (int j = 0; j < GGML_MAX_SRC; j++) {
1103                        struct ggml_tensor * src = node->src[j];
1104                        if (src == NULL) {
1105                            continue;
1106                        }
1107                        if (!ggml_backend_sched_buffer_supported(sched, src, b)) {
1108                            supported = false;
1109                            break;
1110                        }
1111                    }
1112                    if (supported) {
1113                        *node_backend_id = b;
1114                        SET_CAUSE(node, "3.upg");
1115                        break;
1116                    }
1117                }
1118            }
1119        }
1120    }
1121
1122    // pass 4: assign backends to remaining src from dst and view_src
1123    for (int i = 0; i < graph->n_nodes; i++) {
1124        struct ggml_tensor * node = graph->nodes[i];
1125        int * cur_backend_id = &tensor_backend_id(node);
1126        if (node->view_src != NULL && *cur_backend_id == -1) {
1127            *cur_backend_id = tensor_backend_id(node->view_src);
1128            SET_CAUSE(node, "4.vsrc");
1129        }
1130        for (int j = 0; j < GGML_MAX_SRC; j++) {
1131            struct ggml_tensor * src = node->src[j];
1132            if (src == NULL) {
1133                continue;
1134            }
1135            int * src_backend_id = &tensor_backend_id(src);
1136            if (*src_backend_id == -1) {
1137                if (src->view_src != NULL) {
1138                    // views are always on the same backend as the source
1139                    *src_backend_id = tensor_backend_id(src->view_src);
1140                    SET_CAUSE(src, "4.vsrc");
1141                } else {
1142                    *src_backend_id = *cur_backend_id;
1143                    SET_CAUSE(src, "4.cur");
1144                }
1145            }
1146        }
1147        // if the node is still unassigned, assign it to the first backend that supports it
1148        for (int b = 0; b < sched->n_backends && *cur_backend_id == -1; b++) {
1149            ggml_backend_sched_set_if_supported(sched, node, b, cur_backend_id);
1150        }
1151        GGML_ASSERT(*cur_backend_id != -1);
1152    }
1153
1154    // pass 5: split graph, find tensors that need to be copied
1155    {
1156        int i_split = 0;
1157        struct ggml_backend_sched_split * split = &sched->splits[0];
1158        // find the backend of the first split, skipping view ops
1159        int i = 0;
1160        for (; i < graph->n_nodes; i++) {
1161            struct ggml_tensor * node = graph->nodes[i];
1162            if (!ggml_is_view_op(node->op)) {
1163                split->backend_id = tensor_backend_id(node);
1164                break;
1165            }
1166        }
1167        split->i_start = 0;
1168        split->n_inputs = 0;
1169        int cur_backend_id = split->backend_id;
1170        for (; i < graph->n_nodes; i++) {
1171            struct ggml_tensor * node = graph->nodes[i];
1172
1173            if (ggml_is_view_op(node->op)) {
1174                continue;
1175            }
1176
1177            const int node_backend_id = tensor_backend_id(node);
1178
1179            GGML_ASSERT(node_backend_id != -1); // all nodes should be assigned by now, this can happen if there is no CPU fallback
1180
1181            // check if we should start a new split based on the sources of the current node
1182            bool need_new_split = false;
1183            if (node_backend_id == cur_backend_id && split->n_inputs > 0) {
1184                for (int j = 0; j < GGML_MAX_SRC; j++) {
1185                    struct ggml_tensor * src = node->src[j];
1186                    if (src == NULL) {
1187                        continue;
1188                    }
1189                    // check if a weight is on a different and incompatible backend
1190                    // by starting a new split, the memory of the previously offloaded weights can be reused
1191                    if (src->buffer != NULL && src->buffer->usage == GGML_BACKEND_BUFFER_USAGE_WEIGHTS) {
1192                        int src_backend_id = tensor_backend_id(src);
1193                        if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
1194                            need_new_split = true;
1195                            break;
1196                        }
1197                    }
1198                    // check if the split has too many inputs
1199                    // FIXME: count the number of inputs instead of only checking when full
1200                    if (split->n_inputs == GGML_SCHED_MAX_SPLIT_INPUTS) {
1201                        const size_t id = hash_id(src);
1202                        int src_backend_id = sched->hv_tensor_backend_ids[id];
1203                        bool supported = ggml_backend_sched_buffer_supported(sched, src, cur_backend_id);
1204                        if (src_backend_id != cur_backend_id && tensor_id_copy(id, cur_backend_id, 0) == NULL && !supported) {
1205                            need_new_split = true;
1206                            break;
1207                        }
1208                    }
1209                }
1210            }
1211
1212            if (node_backend_id != cur_backend_id || need_new_split) {
1213                split->i_end = i;
1214                i_split++;
1215                if (i_split >= sched->splits_capacity) {
1216                    sched->splits_capacity *= 2;
1217                    sched->splits = (ggml_backend_sched_split *)
1218                        realloc(sched->splits, sched->splits_capacity * sizeof(struct ggml_backend_sched_split));
1219                    GGML_ASSERT(sched->splits != NULL);
1220                }
1221                split = &sched->splits[i_split];
1222                split->backend_id = node_backend_id;
1223                split->i_start = i;
1224                split->n_inputs = 0;
1225                cur_backend_id = node_backend_id;
1226            }
1227
1228            // find inputs that are not on the same backend
1229            for (int j = 0; j < GGML_MAX_SRC; j++) {
1230                struct ggml_tensor * src = node->src[j];
1231                if (src == NULL) {
1232                    continue;
1233                }
1234
1235                size_t src_id = hash_id(src);
1236                const int src_backend_id = sched->hv_tensor_backend_ids[src_id];
1237                GGML_ASSERT(src_backend_id != -1); // all inputs should be assigned by now
1238
1239                if (src->flags & GGML_TENSOR_FLAG_INPUT && sched->n_copies > 1) {
1240                    if (tensor_id_copy(src_id, src_backend_id, 0) == NULL) {
1241                        ggml_backend_t backend = sched->backends[src_backend_id];
1242                        for (int c = 0; c < sched->n_copies; c++) {
1243                            struct ggml_tensor * tensor_copy;
1244                            if (c == sched->cur_copy) {
1245                                tensor_copy = src; // use the original tensor as the current copy
1246                            } else {
1247                                tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
1248                                ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
1249                            }
1250                            ggml_set_input(tensor_copy);
1251                            ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
1252                            tensor_id_copy(src_id, src_backend_id, c) = tensor_copy;
1253                            SET_CAUSE(tensor_copy, "4.cpy");
1254                        }
1255                        int n_graph_inputs = sched->n_graph_inputs++;
1256                        GGML_ASSERT(n_graph_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
1257                        sched->graph_inputs[n_graph_inputs] = src;
1258                    }
1259                }
1260
1261                if (src_backend_id != cur_backend_id && !ggml_backend_sched_buffer_supported(sched, src, cur_backend_id)) {
1262                    // create a copy of the input in the split's backend
1263                    if (tensor_id_copy(src_id, cur_backend_id, 0) == NULL) {
1264                        ggml_backend_t backend = sched->backends[cur_backend_id];
1265                        for (int c = 0; c < sched->n_copies; c++) {
1266                            struct ggml_tensor * tensor_copy = ggml_dup_tensor_layout(sched->ctx, src);
1267                            ggml_format_name(tensor_copy, "%s#%s#%d", ggml_backend_name(backend), src->name, c);
1268                            if (sched->n_copies > 1) {
1269                                ggml_set_input(tensor_copy);
1270                                ggml_set_output(tensor_copy); // prevent ggml-alloc from overwriting the tensor
1271                            }
1272                            tensor_id_copy(src_id, cur_backend_id, c) = tensor_copy;
1273                            SET_CAUSE(tensor_copy, "4.cpy");
1274                        }
1275                        int n_inputs = split->n_inputs++;
1276                        GGML_ASSERT(n_inputs < GGML_SCHED_MAX_SPLIT_INPUTS);
1277                        split->inputs[n_inputs] = src;
1278                    }
1279                    node->src[j] = tensor_id_copy(src_id, cur_backend_id, sched->cur_copy);
1280                }
1281            }
1282        }
1283        split->i_end = graph->n_nodes;
1284        sched->n_splits = i_split + 1;
1285    }
1286
1287    if (sched->debug) {
1288        ggml_backend_sched_print_assignments(sched, graph);
1289    }
1290
1291    // swap node_backend_ids and leaf _backend_ids with prevs
1292    {
1293        int * tmp = sched->node_backend_ids;
1294        sched->node_backend_ids = sched->prev_node_backend_ids;
1295        sched->prev_node_backend_ids = tmp;
1296
1297        tmp = sched->leaf_backend_ids;
1298        sched->leaf_backend_ids = sched->prev_leaf_backend_ids;
1299        sched->prev_leaf_backend_ids = tmp;
1300    }
1301
1302    int graph_size = std::max(graph->n_nodes, graph->n_leafs) + sched->n_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sched->n_copies;
1303
1304    // remember the actual graph_size for performing reallocation checks later [GGML_SCHED_DEBUG_REALLOC]
1305    sched->debug_prev_graph_size = sched->debug_graph_size;
1306    sched->debug_graph_size = graph_size;
1307
1308    if (sched->graph.size < graph_size) {
1309        sched->graph.size = graph_size;
1310        sched->graph.nodes = (ggml_tensor **) realloc(sched->graph.nodes, graph_size * sizeof(struct ggml_tensor *));
1311        sched->graph.leafs = (ggml_tensor **) realloc(sched->graph.leafs, graph_size * sizeof(struct ggml_tensor *));
1312        GGML_ASSERT(sched->graph.nodes != NULL);
1313        GGML_ASSERT(sched->graph.leafs != NULL);
1314    }
1315    sched->graph.n_nodes = 0;
1316    sched->graph.n_leafs = 0;
1317
1318    struct ggml_cgraph * graph_copy = &sched->graph;
1319
1320    for (int i = 0; i < sched->n_splits; i++) {
1321        struct ggml_backend_sched_split * split = &sched->splits[i];
1322        split->graph = ggml_graph_view(graph, split->i_start, split->i_end);
1323
1324        // Optimize this split of the graph. This needs to happen before we make graph_copy,
1325        // so they are in sync.
1326        ggml_backend_graph_optimize(sched->backends[split->backend_id], &split->graph);
1327
1328        // add inputs to the graph copy so that they are allocated by ggml-alloc at the start of the split
1329        for (int j = 0; j < split->n_inputs; j++) {
1330            assert(graph_copy->size > (graph_copy->n_nodes + 1));
1331
1332            struct ggml_tensor * input = split->inputs[j];
1333            const size_t input_id = hash_id(input);
1334            struct ggml_tensor * input_cpy = tensor_id_copy(input_id, split->backend_id, sched->cur_copy);
1335
1336            // add a dependency to the input source so that it is not freed before the copy is done
1337            struct ggml_tensor * input_dep = ggml_view_tensor(sched->ctx, input);
1338            input_dep->src[0] = input;
1339            sched->node_backend_ids[graph_copy->n_nodes] = sched->hv_tensor_backend_ids[input_id];
1340            graph_copy->nodes[graph_copy->n_nodes++] = input_dep;
1341
1342            // add a dependency to the input copy so that it is allocated at the start of the split
1343            sched->node_backend_ids[graph_copy->n_nodes] = split->backend_id;
1344            graph_copy->nodes[graph_copy->n_nodes++] = input_cpy;
1345        }
1346
1347        for (int j = split->i_start; j < split->i_end; j++) {
1348            assert(graph_copy->size > graph_copy->n_nodes);
1349            sched->node_backend_ids[graph_copy->n_nodes] = tensor_backend_id(graph->nodes[j]);
1350            graph_copy->nodes[graph_copy->n_nodes++] = graph->nodes[j];
1351        }
1352    }
1353
1354    if (sched->n_copies > 1) {
1355        // add input copies as leafs so that they are allocated first
1356        for (int i = 0; i < sched->n_graph_inputs; i++) {
1357            struct ggml_tensor * input = sched->graph_inputs[i];
1358            size_t id = hash_id(input);
1359            int backend_id = tensor_backend_id(input);
1360            for (int c = 0; c < sched->n_copies; c++) {
1361                struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
1362                sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
1363                assert(graph_copy->size > graph_copy->n_leafs);
1364                graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
1365            }
1366        }
1367
1368        for (int i = 0; i < sched->n_splits; i++) {
1369            struct ggml_backend_sched_split * split = &sched->splits[i];
1370            int backend_id = split->backend_id;
1371            for (int j = 0; j < split->n_inputs; j++) {
1372                struct ggml_tensor * input = split->inputs[j];
1373                size_t id = hash_id(input);
1374                for (int c = 0; c < sched->n_copies; c++) {
1375                    struct ggml_tensor * input_cpy = tensor_id_copy(id, backend_id, c);
1376                    sched->leaf_backend_ids[graph_copy->n_leafs] = backend_id;
1377                    assert(graph_copy->size > graph_copy->n_leafs);
1378                    graph_copy->leafs[graph_copy->n_leafs++] = input_cpy;
1379                }
1380            }
1381        }
1382    }
1383
1384    // add leafs from the original graph
1385    for (int i = 0; i < graph->n_leafs; i++) {
1386        struct ggml_tensor * leaf = graph->leafs[i];
1387        sched->leaf_backend_ids[graph_copy->n_leafs] = tensor_backend_id(leaf);
1388        assert(graph_copy->size > graph_copy->n_leafs);
1389        graph_copy->leafs[graph_copy->n_leafs++] = leaf;
1390    }
1391}
1392
1393static bool ggml_backend_sched_alloc_splits(ggml_backend_sched_t sched) {
1394    bool backend_ids_changed = false;
1395    for (int i = 0; i < sched->graph.n_nodes; i++) {
1396        if (sched->node_backend_ids[i] != sched->prev_node_backend_ids[i] &&
1397            sched->bufts[sched->node_backend_ids[i]] != sched->bufts[sched->prev_node_backend_ids[i]]) {
1398            backend_ids_changed = true;
1399            break;
1400        }
1401    }
1402    if (!backend_ids_changed) {
1403        for (int i = 0; i < sched->graph.n_leafs; i++) {
1404            if (sched->leaf_backend_ids[i] != sched->prev_leaf_backend_ids[i] &&
1405                sched->bufts[sched->leaf_backend_ids[i]] != sched->bufts[sched->prev_leaf_backend_ids[i]]) {
1406                backend_ids_changed = true;
1407                break;
1408            }
1409        }
1410    }
1411
1412    // allocate graph
1413    if (backend_ids_changed || !ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
1414#ifndef NDEBUG
1415        GGML_LOG_DEBUG("%s: failed to allocate graph, reserving (backend_ids_changed = %d)\n", __func__, backend_ids_changed);
1416#endif
1417
1418        if (sched->debug_realloc > 0) {
1419            // we are interested only in situations where the graph was reallocated even though its size remained the same [GGML_SCHED_DEBUG_REALLOC]
1420            // example: https://github.com/ggml-org/llama.cpp/pull/17143
1421            const bool unexpected = !backend_ids_changed && sched->debug_prev_graph_size == sched->debug_graph_size;
1422
1423            if (unexpected || sched->debug_realloc > 1) {
1424                GGML_ABORT("%s: unexpected graph reallocation (graph size = %d, nodes = %d, leafs = %d), debug_realloc = %d\n", __func__,
1425                        sched->debug_graph_size, sched->graph.n_nodes, sched->graph.n_leafs, sched->debug_realloc);
1426            }
1427        }
1428
1429        // the re-allocation may cause the split inputs to be moved to a different address
1430        // synchronize without ggml_backend_sched_synchronize to avoid changing cur_copy
1431        for (int i = 0; i < sched->n_backends; i++) {
1432            ggml_backend_synchronize(sched->backends[i]);
1433        }
1434
1435        ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids);
1436        if (!ggml_gallocr_alloc_graph(sched->galloc, &sched->graph)) {
1437            GGML_LOG_ERROR("%s: failed to allocate graph\n", __func__);
1438            return false;
1439        }
1440    }
1441
1442    return true;
1443}
1444
1445static enum ggml_status ggml_backend_sched_compute_splits(ggml_backend_sched_t sched) {
1446    GGML_ASSERT(sched);
1447    struct ggml_backend_sched_split * splits = sched->splits;
1448
1449    ggml_tensor * prev_ids_tensor = nullptr;
1450    std::vector<int32_t> ids;
1451    std::vector<ggml_bitset_t> used_ids;
1452
1453    for (int split_id = 0; split_id < sched->n_splits; split_id++) {
1454        struct ggml_backend_sched_split * split = &splits[split_id];
1455        int split_backend_id = split->backend_id;
1456        ggml_backend_t split_backend = sched->backends[split_backend_id];
1457
1458        // copy the input tensors to the split backend
1459        for (int input_id = 0; input_id < split->n_inputs; input_id++) {
1460            ggml_backend_t input_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[input_id]);
1461            struct ggml_tensor * input = split->inputs[input_id];
1462            struct ggml_tensor * input_cpy = tensor_copy(input, split_backend_id, sched->cur_copy);
1463
1464            if (input->flags & GGML_TENSOR_FLAG_INPUT) {
1465                // inputs from the user must be copied immediately to prevent the user overwriting the data before the copy is done
1466                if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1467                    ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1468                } else {
1469                    ggml_backend_synchronize(split_backend);
1470                }
1471                ggml_backend_tensor_copy(input, input_cpy);
1472            } else {
1473                // wait for the split backend to finish using the input before overwriting it
1474                if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1475                    ggml_backend_event_wait(split_backend, sched->events[split_backend_id][sched->cur_copy]);
1476                } else {
1477                    ggml_backend_synchronize(split_backend);
1478                }
1479
1480                // when offloading MoE weights, we can reduce the amount of data copied by copying only the experts that are used
1481                ggml_tensor * node = split->graph.nodes[0];
1482                if (split->graph.n_nodes > 0 &&
1483                    ggml_backend_buffer_get_usage(input->buffer) == GGML_BACKEND_BUFFER_USAGE_WEIGHTS &&
1484                    ggml_backend_buffer_is_host(input->buffer) && (
1485                    (node->src[0] == input_cpy && node->op == GGML_OP_MUL_MAT_ID)
1486                    //|| (node->src[1] == input_cpy && node->op == GGML_OP_ADD_ID) /* GGML_OP_ADD_ID weights are small and not worth splitting */
1487                    )) {
1488
1489                    const int64_t n_expert   = node->op == GGML_OP_MUL_MAT_ID ? input->ne[2] : input->ne[1];
1490                    const size_t expert_size = node->op == GGML_OP_MUL_MAT_ID ? input->nb[2] : input->nb[1];
1491
1492                    ggml_backend_synchronize(input_backend);
1493
1494                    // get the ids
1495                    ggml_tensor * ids_tensor = node->src[2];
1496                    ggml_backend_t ids_backend = split_backend;
1497
1498                    // if the ids tensor is also an input of the split, it may not have been copied yet to the split backend
1499                    // in that case, we use the original ids tensor
1500                    for (int i = input_id + 1; i < split->n_inputs; i++) {
1501                        if (ids_tensor == tensor_copy(split->inputs[i], split_backend_id, sched->cur_copy)) {
1502                            ids_tensor = split->inputs[i];
1503                            ids_backend = ggml_backend_sched_get_tensor_backend(sched, split->inputs[i]);
1504                            break;
1505                        }
1506                    }
1507
1508                    if (ids_tensor != prev_ids_tensor) {
1509                        ids.resize(ggml_nbytes(ids_tensor) / sizeof(int32_t));
1510                        ggml_backend_tensor_get_async(ids_backend, ids_tensor, ids.data(), 0, ggml_nbytes(ids_tensor));
1511                        ggml_backend_synchronize(ids_backend);
1512
1513                        // find the used experts
1514                        used_ids.clear();
1515                        used_ids.resize(ggml_bitset_size(n_expert));
1516                        for (int64_t i1 = 0; i1 < ids_tensor->ne[1]; i1++) {
1517                            for (int64_t i0 = 0; i0 < ids_tensor->ne[0]; i0++) {
1518                                int32_t id = ids[i1 * ids_tensor->nb[1]/sizeof(int32_t) + i0 * ids_tensor->nb[0]/sizeof(int32_t)];
1519                                GGML_ASSERT(id >= 0 && id < n_expert);
1520                                ggml_bitset_set(used_ids.data(), id);
1521                            }
1522                        }
1523
1524                        prev_ids_tensor = ids_tensor;
1525                    }
1526
1527                    // group consecutive experts and copy them together
1528                    auto copy_experts = [&](int32_t first_id, int32_t last_id) {
1529                        const size_t expert_offset = first_id * expert_size;
1530                        const size_t expert_size_copy =  (last_id - first_id + 1) * expert_size;
1531                        const size_t padding = std::min<size_t>(expert_size, 512);
1532                        const size_t padding_end = last_id < n_expert - 1 ? padding : 0;
1533
1534                        ggml_backend_tensor_set_async(split_backend,
1535                            input_cpy,
1536                            (const uint8_t *)input->data + expert_offset, expert_offset,
1537                            // copy a bit extra at the to ensure there are no NaNs in the padding of the last expert
1538                            // this is necessary for MMQ in the CUDA backend
1539                            expert_size_copy + padding_end);
1540                    };
1541
1542                    int id = 0;
1543                    while (!ggml_bitset_get(used_ids.data(), id)) {
1544                        id++;
1545                    }
1546                    int32_t first_id = id;
1547                    int32_t last_id = first_id;
1548
1549                    for (++id; id < n_expert; ++id) {
1550                        if (!ggml_bitset_get(used_ids.data(), id)) {
1551                            continue;
1552                        }
1553
1554                        if (id == last_id + 1) {
1555                            last_id = id;
1556                            continue;
1557                        }
1558
1559                        copy_experts(first_id, last_id);
1560
1561                        first_id = id;
1562                        last_id = id;
1563                    }
1564                    copy_experts(first_id, last_id);
1565                } else {
1566                    // try async copy, but if not possible, we can still use a sync copy without synchronizing the dst backend, since we handle the synchronization here with multiple copies and events
1567                    // TODO: add public function to facilitate this, since applications do not have direct access to the backend interface
1568                    if (!split_backend->iface.cpy_tensor_async || !split_backend->iface.cpy_tensor_async(input_backend, split_backend, input, input_cpy)) {
1569                        ggml_backend_synchronize(input_backend);
1570                        if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1571                            ggml_backend_event_synchronize(sched->events[split_backend_id][sched->cur_copy]);
1572                        } else {
1573                            ggml_backend_synchronize(split_backend);
1574                        }
1575                        ggml_backend_tensor_copy(input, input_cpy);
1576                    }
1577                }
1578            }
1579        }
1580
1581        if (!sched->callback_eval) {
1582            enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &split->graph);
1583            if (ec != GGML_STATUS_SUCCESS) {
1584                return ec;
1585            }
1586        } else {
1587            // similar to ggml_backend_compare_graph_backend
1588            for (int j0 = 0; j0 < split->graph.n_nodes; j0++) {
1589                struct ggml_tensor * t = split->graph.nodes[j0];
1590
1591                // check if the user needs data from this node
1592                bool need = sched->callback_eval(t, true, sched->callback_eval_user_data);
1593
1594                int j1 = j0;
1595
1596                // determine the range [j0, j1] of nodes that can be computed together
1597                while (!need && j1 < split->graph.n_nodes - 1) {
1598                    t = split->graph.nodes[++j1];
1599                    need = sched->callback_eval(t, true, sched->callback_eval_user_data);
1600                }
1601
1602                struct ggml_cgraph gv = ggml_graph_view(&split->graph, j0, j1 + 1);
1603
1604                enum ggml_status ec = ggml_backend_graph_compute_async(split_backend, &gv);
1605                if (ec != GGML_STATUS_SUCCESS) {
1606                    return ec;
1607                }
1608
1609                // TODO: pass backend to the callback, then the user can decide if they want to synchronize
1610                ggml_backend_synchronize(split_backend);
1611
1612                if (need && !sched->callback_eval(t, false, sched->callback_eval_user_data)) {
1613                    break;
1614                }
1615
1616                j0 = j1;
1617            }
1618        }
1619
1620        // record the event of this copy
1621        if (split->n_inputs > 0) {
1622            if (sched->events[split_backend_id][sched->cur_copy] != NULL) {
1623                ggml_backend_event_record(sched->events[split_backend_id][sched->cur_copy], split_backend);
1624            }
1625        }
1626    }
1627
1628    return GGML_STATUS_SUCCESS;
1629}
1630
1631ggml_backend_sched_t ggml_backend_sched_new(
1632        ggml_backend_t * backends,
1633        ggml_backend_buffer_type_t * bufts,
1634        int n_backends,
1635        size_t graph_size,
1636        bool parallel,
1637        bool op_offload) {
1638    GGML_ASSERT(n_backends > 0);
1639    GGML_ASSERT(n_backends <= GGML_SCHED_MAX_BACKENDS);
1640    GGML_ASSERT(ggml_backend_dev_type(ggml_backend_get_device(backends[n_backends - 1])) == GGML_BACKEND_DEVICE_TYPE_CPU);
1641
1642    struct ggml_backend_sched * sched = (ggml_backend_sched *) calloc(1, sizeof(struct ggml_backend_sched));
1643
1644    const char * GGML_SCHED_DEBUG = getenv("GGML_SCHED_DEBUG");
1645    sched->debug = GGML_SCHED_DEBUG ? atoi(GGML_SCHED_DEBUG) : 0;
1646
1647    sched->debug_realloc = 0;
1648#ifdef GGML_SCHED_NO_REALLOC
1649    sched->debug_realloc = 1;
1650#endif
1651    const char * GGML_SCHED_DEBUG_REALLOC = getenv("GGML_SCHED_DEBUG_REALLOC");
1652    sched->debug_realloc = GGML_SCHED_DEBUG_REALLOC ? atoi(GGML_SCHED_DEBUG_REALLOC) : sched->debug_realloc;
1653
1654    sched->n_backends = n_backends;
1655    sched->n_copies = parallel ? GGML_SCHED_MAX_COPIES : 1;
1656
1657    // initialize hash table
1658    // FIXME: needs to be size*2 to account for leafs (do it in graph_split instead)
1659    sched->hash_set    = ggml_hash_set_new(graph_size);
1660    sched->hv_tensor_backend_ids = (int *) malloc(sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
1661    sched->hv_tensor_copies      = (ggml_tensor **) malloc(sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
1662
1663    const size_t ggml_sched_max_splits = graph_size; // at most there is one split for each node in the graph
1664    const size_t nodes_size = graph_size + ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2;
1665    sched->node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->node_backend_ids[0]));
1666    sched->leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->leaf_backend_ids[0]));
1667    sched->prev_node_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_node_backend_ids[0]));
1668    sched->prev_leaf_backend_ids = (int *) calloc(nodes_size, sizeof(sched->prev_leaf_backend_ids[0]));
1669
1670    sched->debug_graph_size = 0;
1671    sched->debug_prev_graph_size = 0;
1672
1673    sched->context_buffer_size = ggml_sched_max_splits*GGML_SCHED_MAX_SPLIT_INPUTS*2*sizeof(struct ggml_tensor) + ggml_graph_overhead_custom(graph_size, false);
1674    sched->context_buffer = (char *) malloc(sched->context_buffer_size);
1675
1676    const int initial_splits_capacity = 16;
1677    sched->splits = (ggml_backend_sched_split *) calloc(initial_splits_capacity, sizeof(sched->splits[0]));
1678    sched->splits_capacity = initial_splits_capacity;
1679
1680    for (int b = 0; b < n_backends; b++) {
1681        sched->backends[b] = backends[b];
1682        sched->bufts[b] = bufts ? bufts[b] : ggml_backend_get_default_buffer_type(backends[b]);
1683        GGML_ASSERT(ggml_backend_supports_buft(backends[b], sched->bufts[b]));
1684
1685        if (sched->n_copies > 1) {
1686            for (int c = 0; c < sched->n_copies; c++) {
1687                sched->events[b][c] = ggml_backend_event_new(backends[b]->device);
1688            }
1689        }
1690    }
1691
1692    sched->galloc = ggml_gallocr_new_n(sched->bufts, n_backends);
1693    sched->op_offload = op_offload;
1694
1695    ggml_backend_sched_reset(sched);
1696
1697    return sched;
1698}
1699
1700void ggml_backend_sched_free(ggml_backend_sched_t sched) {
1701    if (sched == NULL) {
1702        return;
1703    }
1704    for (int b = 0; b < sched->n_backends; b++) {
1705        for (int c = 0; c < sched->n_copies; c++) {
1706            ggml_backend_event_free(sched->events[b][c]);
1707        }
1708    }
1709    ggml_gallocr_free(sched->galloc);
1710    ggml_free(sched->ctx);
1711    ggml_hash_set_free(&sched->hash_set);
1712    free(sched->splits);
1713    free(sched->hv_tensor_backend_ids);
1714    free(sched->hv_tensor_copies);
1715    free(sched->node_backend_ids);
1716    free(sched->leaf_backend_ids);
1717    free(sched->prev_node_backend_ids);
1718    free(sched->prev_leaf_backend_ids);
1719    free(sched->context_buffer);
1720    free(sched->graph.nodes);
1721    free(sched->graph.leafs);
1722    free(sched);
1723}
1724
1725void ggml_backend_sched_reset(ggml_backend_sched_t sched) {
1726    GGML_ASSERT(sched);
1727    // reset state for the next run
1728    if (!sched->is_reset) {
1729        ggml_hash_set_reset(&sched->hash_set);
1730        memset(sched->hv_tensor_backend_ids, -1, sched->hash_set.size * sizeof(sched->hv_tensor_backend_ids[0]));
1731        memset(sched->hv_tensor_copies,       0, sched->hash_set.size * sched->n_backends * sched->n_copies * sizeof(struct ggml_tensor *));
1732        sched->is_reset = true;
1733    }
1734    sched->is_alloc = false;
1735}
1736
1737void ggml_backend_sched_reserve_size(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph, size_t * sizes) {
1738    GGML_ASSERT(sched);
1739    GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
1740    GGML_ASSERT(sizes);
1741
1742    ggml_backend_sched_reset(sched);
1743
1744    ggml_backend_sched_synchronize(sched);
1745
1746    ggml_backend_sched_split_graph(sched, measure_graph);
1747
1748    ggml_gallocr_reserve_n_size(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids, sizes);
1749}
1750
1751bool ggml_backend_sched_reserve(ggml_backend_sched_t sched, struct ggml_cgraph * measure_graph) {
1752    GGML_ASSERT(sched);
1753    GGML_ASSERT((int)sched->hash_set.size >= measure_graph->n_nodes + measure_graph->n_leafs);
1754
1755    ggml_backend_sched_synchronize(sched);
1756
1757    ggml_backend_sched_split_graph(sched, measure_graph);
1758
1759    if (!ggml_gallocr_reserve_n(sched->galloc, &sched->graph, sched->node_backend_ids, sched->leaf_backend_ids)) {
1760        return false;
1761    }
1762
1763    ggml_backend_sched_reset(sched);
1764
1765    return true;
1766}
1767
1768bool ggml_backend_sched_alloc_graph(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
1769    GGML_ASSERT(sched);
1770    GGML_ASSERT((int)sched->hash_set.size >= graph->n_nodes + graph->n_leafs);
1771    GGML_ASSERT(!sched->is_alloc);
1772
1773    sched->cur_copy = sched->next_copy;
1774    sched->next_copy = (sched->next_copy + 1) % sched->n_copies;
1775
1776    ggml_backend_sched_split_graph(sched, graph);
1777
1778    if (!ggml_backend_sched_alloc_splits(sched)) {
1779        return false;
1780    }
1781
1782    sched->is_alloc = true;
1783
1784    return true;
1785}
1786
1787enum ggml_status ggml_backend_sched_graph_compute(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
1788    enum ggml_status err = ggml_backend_sched_graph_compute_async(sched, graph);
1789    ggml_backend_sched_synchronize(sched);
1790    return err;
1791}
1792
1793enum ggml_status ggml_backend_sched_graph_compute_async(ggml_backend_sched_t sched, struct ggml_cgraph * graph) {
1794    GGML_ASSERT(sched);
1795    if (!sched->is_reset && !sched->is_alloc) {
1796        ggml_backend_sched_reset(sched);
1797    }
1798
1799    if (!sched->is_alloc) {
1800        if (!ggml_backend_sched_alloc_graph(sched, graph)) {
1801            return GGML_STATUS_ALLOC_FAILED;
1802        }
1803    }
1804
1805    return ggml_backend_sched_compute_splits(sched);
1806}
1807
1808void ggml_backend_sched_synchronize(ggml_backend_sched_t sched) {
1809    GGML_ASSERT(sched);
1810    for (int i = 0; i < sched->n_backends; i++) {
1811        ggml_backend_synchronize(sched->backends[i]);
1812    }
1813    if (!sched->is_alloc) {
1814        // if the graph is not already allocated, always use copy 0 after a synchronization
1815        // this ensures that during generation the same copy is used every time,
1816        // which avoids changes in the graph that could cause CUDA or other graphs to be disabled
1817        sched->next_copy = 0;
1818    }
1819}
1820
1821void ggml_backend_sched_set_eval_callback(ggml_backend_sched_t sched, ggml_backend_sched_eval_callback callback, void * user_data) {
1822    GGML_ASSERT(sched);
1823    sched->callback_eval = callback;
1824    sched->callback_eval_user_data = user_data;
1825}
1826
1827int ggml_backend_sched_get_n_splits(ggml_backend_sched_t sched) {
1828    GGML_ASSERT(sched);
1829    return sched->n_splits;
1830}
1831
1832int ggml_backend_sched_get_n_copies(ggml_backend_sched_t sched) {
1833    GGML_ASSERT(sched);
1834    return sched->n_copies;
1835}
1836
1837int ggml_backend_sched_get_n_backends(ggml_backend_sched_t sched) {
1838    GGML_ASSERT(sched);
1839    return sched->n_backends;
1840}
1841
1842ggml_backend_t ggml_backend_sched_get_backend(ggml_backend_sched_t sched, int i) {
1843    GGML_ASSERT(sched);
1844    GGML_ASSERT(i >= 0 && i < sched->n_backends);
1845    return sched->backends[i];
1846}
1847
1848ggml_backend_buffer_type_t ggml_backend_sched_get_buffer_type(ggml_backend_sched_t sched, ggml_backend_t backend) {
1849    GGML_ASSERT(sched);
1850    int backend_index = ggml_backend_sched_backend_id(sched, backend);
1851    GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1852
1853    return sched->bufts[backend_index];
1854}
1855
1856size_t ggml_backend_sched_get_buffer_size(ggml_backend_sched_t sched, ggml_backend_t backend) {
1857    GGML_ASSERT(sched);
1858    int backend_index = ggml_backend_sched_backend_id(sched, backend);
1859    GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1860
1861    return ggml_gallocr_get_buffer_size(sched->galloc, backend_index);
1862}
1863
1864void ggml_backend_sched_set_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node, ggml_backend_t backend) {
1865    GGML_ASSERT(sched);
1866    int backend_index = ggml_backend_sched_backend_id(sched, backend);
1867    GGML_ASSERT(backend_index >= 0 && backend_index < sched->n_backends);
1868    tensor_backend_id(node) = backend_index;
1869    SET_CAUSE(node, "usr");
1870    sched->is_reset = false;
1871}
1872
1873ggml_backend_t ggml_backend_sched_get_tensor_backend(ggml_backend_sched_t sched, struct ggml_tensor * node) {
1874    GGML_ASSERT(sched);
1875    int backend_index = tensor_backend_id(node);
1876    if (backend_index == -1) {
1877        return NULL;
1878    }
1879    return sched->backends[backend_index];
1880}
1881
1882// utils
1883
1884enum ggml_status ggml_backend_view_init(struct ggml_tensor * tensor) {
1885    GGML_ASSERT(tensor);
1886    GGML_ASSERT(tensor->buffer == NULL);
1887    GGML_ASSERT(tensor->view_src != NULL);
1888    GGML_ASSERT(tensor->view_src->buffer != NULL);
1889    GGML_ASSERT(tensor->view_src->data != NULL);
1890
1891    tensor->buffer = tensor->view_src->buffer;
1892    tensor->data = (char *)tensor->view_src->data + tensor->view_offs;
1893    return ggml_backend_buffer_init_tensor(tensor->buffer, tensor);
1894}
1895
1896enum ggml_status ggml_backend_tensor_alloc(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, void * addr) {
1897    GGML_ASSERT(tensor);
1898    GGML_ASSERT(tensor->buffer == NULL);
1899    GGML_ASSERT(tensor->data == NULL);
1900    GGML_ASSERT(tensor->view_src == NULL);
1901    GGML_ASSERT(addr >= ggml_backend_buffer_get_base(buffer));
1902    GGML_ASSERT((char *)addr + ggml_backend_buffer_get_alloc_size(buffer, tensor) <=
1903                (char *)ggml_backend_buffer_get_base(buffer) + ggml_backend_buffer_get_size(buffer));
1904
1905    tensor->buffer = buffer;
1906    tensor->data = addr;
1907    return ggml_backend_buffer_init_tensor(buffer, tensor);
1908}
1909
1910static struct ggml_tensor * graph_copy_dup_tensor(struct ggml_hash_set hash_set, struct ggml_tensor ** node_copies,
1911    struct ggml_context * ctx_allocated, struct ggml_context * ctx_unallocated, struct ggml_tensor * src) {
1912
1913    GGML_ASSERT(src != NULL);
1914    GGML_ASSERT(src->data && "graph must be allocated");
1915
1916    size_t id = ggml_hash_insert(&hash_set, src);
1917    if (id == GGML_HASHSET_ALREADY_EXISTS) {
1918        return node_copies[ggml_hash_find(&hash_set, src)];
1919    }
1920
1921    struct ggml_tensor * dst = ggml_dup_tensor_layout(src->data && !src->view_src ? ctx_allocated : ctx_unallocated, src);
1922    if (src->view_src != NULL) {
1923        dst->view_src = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, src->view_src);
1924        dst->view_offs = src->view_offs;
1925    }
1926    dst->op = src->op;
1927    dst->flags = src->flags;
1928    memcpy(dst->op_params, src->op_params, sizeof(dst->op_params));
1929    ggml_set_name(dst, src->name);
1930
1931    // copy src
1932    for (int i = 0; i < GGML_MAX_SRC; i++) {
1933        struct ggml_tensor * s = src->src[i];
1934        if (s == NULL) {
1935            continue;
1936        }
1937        dst->src[i] = graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, s);
1938    }
1939
1940    node_copies[id] = dst;
1941    return dst;
1942}
1943
1944static void graph_copy_init_tensor(struct ggml_hash_set * hash_set, struct ggml_tensor ** node_copies, bool * node_init, struct ggml_tensor * src) {
1945    size_t id = ggml_hash_find(hash_set, src);
1946    if (node_init[id]) {
1947        return;
1948    }
1949    node_init[id] = true;
1950
1951    struct ggml_tensor * dst = node_copies[id];
1952    if (dst->view_src != NULL) {
1953        graph_copy_init_tensor(hash_set, node_copies, node_init, src->view_src);
1954        enum ggml_status status = ggml_backend_view_init(dst);
1955        GGML_ASSERT(status == GGML_STATUS_SUCCESS);
1956    }
1957    else {
1958        ggml_backend_tensor_copy(src, dst);
1959    }
1960
1961    // init src
1962    for (int i = 0; i < GGML_MAX_SRC; i++) {
1963        struct ggml_tensor * s = src->src[i];
1964        if (s == NULL) {
1965            continue;
1966        }
1967        graph_copy_init_tensor(hash_set, node_copies, node_init, s);
1968    }
1969}
1970
1971struct ggml_backend_graph_copy ggml_backend_graph_copy(ggml_backend_t backend, struct ggml_cgraph * graph) {
1972    GGML_ASSERT(graph);
1973    struct ggml_hash_set hash_set = ggml_hash_set_new(graph->visited_hash_set.size);
1974    struct ggml_tensor ** node_copies = (ggml_tensor **) calloc(hash_set.size, sizeof(node_copies[0])); // NOLINT
1975    bool * node_init = (bool *) calloc(hash_set.size, sizeof(node_init[0]));
1976
1977    struct ggml_init_params params = {
1978        /* .mem_size   = */ ggml_tensor_overhead()*hash_set.size + ggml_graph_overhead_custom(graph->size, false),
1979        /* .mem_buffer = */ NULL,
1980        /* .no_alloc   = */ true
1981    };
1982
1983    struct ggml_context * ctx_allocated = ggml_init(params);
1984    struct ggml_context * ctx_unallocated = ggml_init(params);
1985
1986    if (ctx_allocated == NULL || ctx_unallocated == NULL) {
1987        GGML_LOG_ERROR("%s: failed to allocate context for graph copy\n", __func__);
1988        ggml_hash_set_free(&hash_set);
1989        free(node_copies);
1990        free(node_init);
1991        ggml_free(ctx_allocated);
1992        ggml_free(ctx_unallocated);
1993        return {
1994            /* .buffer           = */ NULL,
1995            /* .ctx_allocated    = */ NULL,
1996            /* .ctx_unallocated  = */ NULL,
1997            /* .graph            = */ NULL,
1998        };
1999    }
2000
2001    // dup nodes
2002    for (int i = 0; i < graph->n_nodes; i++) {
2003        struct ggml_tensor * node = graph->nodes[i];
2004        graph_copy_dup_tensor(hash_set, node_copies, ctx_allocated, ctx_unallocated, node);
2005    }
2006
2007    // allocate nodes
2008    ggml_backend_buffer_t buffer = ggml_backend_alloc_ctx_tensors(ctx_allocated, backend);
2009    if (buffer == NULL) {
2010        GGML_LOG_ERROR("%s: failed to allocate buffer for graph copy\n", __func__);
2011        ggml_hash_set_free(&hash_set);
2012        free(node_copies);
2013        free(node_init);
2014        ggml_free(ctx_allocated);
2015        ggml_free(ctx_unallocated);
2016        return {
2017            /* .buffer           = */ NULL,
2018            /* .ctx_allocated    = */ NULL,
2019            /* .ctx_unallocated  = */ NULL,
2020            /* .graph            = */ NULL,
2021        };
2022    }
2023
2024    //printf("copy buffer size: %zu MB\n", ggml_backend_buffer_get_size(buffer) / 1024 / 1024);
2025
2026    // copy data and init views
2027    for (int i = 0; i < graph->n_nodes; i++) {
2028        struct ggml_tensor * node = graph->nodes[i];
2029        graph_copy_init_tensor(&hash_set, node_copies, node_init, node);
2030    }
2031
2032    // build graph copy
2033    struct ggml_cgraph * graph_copy = ggml_new_graph_custom(ctx_allocated, graph->size, false);
2034    for (int i = 0; i < graph->n_nodes; i++) {
2035        struct ggml_tensor * node = graph->nodes[i];
2036        struct ggml_tensor * node_copy = node_copies[ggml_hash_find(&hash_set, node)];
2037        graph_copy->nodes[i] = node_copy;
2038    }
2039    graph_copy->n_nodes = graph->n_nodes;
2040
2041    ggml_hash_set_free(&hash_set);
2042    free(node_copies);
2043    free(node_init);
2044
2045    return {
2046        /* .buffer           = */ buffer,
2047        /* .ctx_allocated    = */ ctx_allocated,
2048        /* .ctx_unallocated  = */ ctx_unallocated,
2049        /* .graph            = */ graph_copy,
2050    };
2051}
2052
2053void ggml_backend_graph_copy_free(struct ggml_backend_graph_copy copy) {
2054    ggml_backend_buffer_free(copy.buffer);
2055    ggml_free(copy.ctx_allocated);
2056    ggml_free(copy.ctx_unallocated);
2057}
2058
2059bool ggml_backend_compare_graph_backend(ggml_backend_t backend1, ggml_backend_t backend2, struct ggml_cgraph * graph, ggml_backend_eval_callback callback, void * user_data, struct ggml_tensor const * const * test_nodes, size_t num_test_nodes) {
2060    struct ggml_backend_graph_copy copy = ggml_backend_graph_copy(backend2, graph);
2061    if (copy.buffer == NULL) {
2062        return false;
2063    }
2064
2065    struct ggml_cgraph * g1 = graph;
2066    struct ggml_cgraph * g2 = copy.graph;
2067
2068    assert(g1->n_nodes == g2->n_nodes);
2069
2070    if (num_test_nodes != 0) {
2071        GGML_ASSERT(test_nodes);
2072        // Compute the whole graph and only test the output for specific tensors
2073        ggml_backend_graph_compute(backend1, g1);
2074        ggml_backend_graph_compute(backend2, g2);
2075
2076        bool verified = false;
2077        for (int i = 0; i < g1->n_nodes; i++) {
2078            for (size_t j = 0; j < num_test_nodes; ++j) {
2079                if (g1->nodes[i] == test_nodes[j]) {
2080                    callback(i, g1->nodes[i], g2->nodes[i], user_data);
2081                    verified = true;
2082                }
2083            }
2084        }
2085        GGML_ASSERT(verified);
2086    } else {
2087        for (int i = 0; i < g1->n_nodes; i++) {
2088            struct ggml_tensor * t1 = g1->nodes[i];
2089            struct ggml_tensor * t2 = g2->nodes[i];
2090
2091            assert(t1->op == t2->op && ggml_are_same_layout(t1, t2));
2092
2093            struct ggml_cgraph g1v = ggml_graph_view(g1, i, i + 1);
2094            struct ggml_cgraph g2v = ggml_graph_view(g2, i, i + 1);
2095
2096            ggml_backend_graph_compute(backend1, &g1v);
2097            ggml_backend_graph_compute(backend2, &g2v);
2098
2099            if (ggml_is_view_op(t1->op)) {
2100                continue;
2101            }
2102
2103            // compare results, calculate rms etc
2104            if (!callback(i, t1, t2, user_data)) {
2105                break;
2106            }
2107        }
2108    }
2109    ggml_backend_graph_copy_free(copy);
2110
2111    return true;
2112}
2113
2114// CPU backend - buffer
2115
2116static void * ggml_backend_cpu_buffer_get_base(ggml_backend_buffer_t buffer) {
2117    GGML_ASSERT(buffer);
2118    uintptr_t data = (uintptr_t)buffer->context;
2119
2120    // align the buffer
2121    if (data % TENSOR_ALIGNMENT != 0) {
2122        data = GGML_PAD(data, TENSOR_ALIGNMENT);
2123    }
2124
2125    return (void *)data;
2126}
2127
2128static void ggml_backend_cpu_buffer_free_buffer(ggml_backend_buffer_t buffer) {
2129    GGML_ASSERT(buffer);
2130    ggml_aligned_free(buffer->context, buffer->size);
2131}
2132
2133static void ggml_backend_cpu_buffer_memset_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
2134    GGML_ASSERT(tensor);
2135    memset((char *)tensor->data + offset, value, size);
2136
2137    GGML_UNUSED(buffer);
2138}
2139
2140static void ggml_backend_cpu_buffer_set_tensor(ggml_backend_buffer_t buffer, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
2141    GGML_ASSERT(tensor);
2142    memcpy((char *)tensor->data + offset, data, size);
2143
2144    GGML_UNUSED(buffer);
2145}
2146
2147static void ggml_backend_cpu_buffer_get_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
2148    GGML_ASSERT(tensor);
2149    memcpy(data, (const char *)tensor->data + offset, size);
2150
2151    GGML_UNUSED(buffer);
2152}
2153
2154static bool ggml_backend_cpu_buffer_cpy_tensor(ggml_backend_buffer_t buffer, const struct ggml_tensor * src, struct ggml_tensor * dst) {
2155    GGML_ASSERT(src);
2156    if (ggml_backend_buffer_is_host(src->buffer)) {
2157        memcpy(dst->data, src->data, ggml_nbytes(src));
2158        return true;
2159    }
2160    return false;
2161
2162    GGML_UNUSED(buffer);
2163}
2164
2165static void ggml_backend_cpu_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
2166    GGML_ASSERT(buffer);
2167    memset(buffer->context, value, buffer->size);
2168}
2169
2170static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_i = {
2171    /* .free_buffer     = */ ggml_backend_cpu_buffer_free_buffer,
2172    /* .get_base        = */ ggml_backend_cpu_buffer_get_base,
2173    /* .init_tensor     = */ NULL, // no initialization required
2174    /* .memset_tensor   = */ ggml_backend_cpu_buffer_memset_tensor,
2175    /* .set_tensor      = */ ggml_backend_cpu_buffer_set_tensor,
2176    /* .get_tensor      = */ ggml_backend_cpu_buffer_get_tensor,
2177    /* .cpy_tensor      = */ ggml_backend_cpu_buffer_cpy_tensor,
2178    /* .clear           = */ ggml_backend_cpu_buffer_clear,
2179    /* .reset           = */ NULL,
2180};
2181
2182static const struct ggml_backend_buffer_i ggml_backend_cpu_buffer_from_ptr_i = {
2183    /* .free_buffer     = */ NULL, // ptr is not owned by the buffer, so it does not need to be freed
2184    /* .get_base        = */ ggml_backend_cpu_buffer_get_base,
2185    /* .init_tensor     = */ NULL, // no initialization required
2186    /* .memset_tensor   = */ ggml_backend_cpu_buffer_memset_tensor,
2187    /* .set_tensor      = */ ggml_backend_cpu_buffer_set_tensor,
2188    /* .get_tensor      = */ ggml_backend_cpu_buffer_get_tensor,
2189    /* .cpy_tensor      = */ ggml_backend_cpu_buffer_cpy_tensor,
2190    /* .clear           = */ ggml_backend_cpu_buffer_clear,
2191    /* .reset           = */ NULL,
2192};
2193
2194// CPU backend buffer type
2195
2196// this buffer type is defined here to make it available to all backends
2197
2198static const char * ggml_backend_cpu_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
2199    return "CPU";
2200
2201    GGML_UNUSED(buft);
2202}
2203
2204static ggml_backend_buffer_t ggml_backend_cpu_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
2205    void * data = ggml_aligned_malloc(size);
2206
2207    if (data == NULL) {
2208        GGML_LOG_ERROR("%s: failed to allocate buffer of size %zu\n", __func__, size);
2209        return NULL;
2210    }
2211
2212    return ggml_backend_buffer_init(buft, ggml_backend_cpu_buffer_i, data, size);
2213}
2214
2215static size_t ggml_backend_cpu_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
2216    return TENSOR_ALIGNMENT;
2217
2218    GGML_UNUSED(buft);
2219}
2220
2221static bool ggml_backend_cpu_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
2222    return true;
2223
2224    GGML_UNUSED(buft);
2225}
2226
2227ggml_backend_buffer_type_t ggml_backend_cpu_buffer_type(void) {
2228    static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
2229        /* .iface   = */ {
2230            /* .get_name         = */ ggml_backend_cpu_buffer_type_get_name,
2231            /* .alloc_buffer     = */ ggml_backend_cpu_buffer_type_alloc_buffer,
2232            /* .get_alignment    = */ ggml_backend_cpu_buffer_type_get_alignment,
2233            /* .get_max_size     = */ NULL, // defaults to SIZE_MAX
2234            /* .get_alloc_size   = */ NULL, // defaults to ggml_nbytes
2235            /* .is_host          = */ ggml_backend_cpu_buffer_type_is_host,
2236        },
2237        /* .device  = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
2238        /* .context = */ NULL,
2239    };
2240
2241    return &ggml_backend_cpu_buffer_type;
2242}
2243
2244static const char * ggml_backend_cpu_buffer_from_ptr_type_get_name(ggml_backend_buffer_type_t buft) {
2245    return "CPU_Mapped";
2246
2247    GGML_UNUSED(buft);
2248}
2249
2250static ggml_backend_buffer_type_t ggml_backend_cpu_buffer_from_ptr_type(void) {
2251    static struct ggml_backend_buffer_type ggml_backend_cpu_buffer_type = {
2252        /* .iface   = */ {
2253            /* .get_name         = */ ggml_backend_cpu_buffer_from_ptr_type_get_name,
2254            /* .alloc_buffer     = */ ggml_backend_cpu_buffer_type_alloc_buffer,
2255            /* .get_alignment    = */ ggml_backend_cpu_buffer_type_get_alignment,
2256            /* .get_max_size     = */ NULL, // defaults to SIZE_MAX
2257            /* .get_alloc_size   = */ NULL, // defaults to ggml_nbytes
2258            /* .is_host          = */ ggml_backend_cpu_buffer_type_is_host,
2259        },
2260        /* .device  = */ NULL, // FIXME ggml_backend_reg_dev_get(ggml_backend_cpu_reg(), 0),
2261        /* .context = */ NULL,
2262    };
2263
2264    return &ggml_backend_cpu_buffer_type;
2265}
2266
2267ggml_backend_buffer_t ggml_backend_cpu_buffer_from_ptr(void * ptr, size_t size) {
2268    GGML_ASSERT((uintptr_t)ptr % TENSOR_ALIGNMENT == 0 && "buffer pointer must be aligned");
2269    return ggml_backend_buffer_init(ggml_backend_cpu_buffer_from_ptr_type(), ggml_backend_cpu_buffer_from_ptr_i, ptr, size);
2270}