1#include "ggml-zdnn.h"
  2#include "ggml-impl.h"
  3#include "ggml-backend-impl.h"
  4
  5#include "ggml-zdnn/common.hpp"
  6#include "ggml-zdnn/mmf.hpp"
  7#include "ggml-zdnn/utils.hpp"
  8#include "ggml.h"
  9
 10#include <vector>
 11#include <memory>
 12#include <csignal>  // raise(SIGTRAP)
 13#include <unistd.h>
 14
 15static void ggml_zdnn_compute_forward_mul_mat(
 16    const ggml_backend_zdnn_context * ctx,
 17          ggml_tensor * dst) {
 18
 19    const ggml_tensor * src0 = dst->src[0];  // weights
 20    const ggml_tensor * src1 = dst->src[1];  // inputs
 21
 22    // TODO: implement support for quantized types
 23    // we currently only support f32, f16, and bf16
 24    ggml_zdnn_mul_mat_f(ctx, src0, src1, dst);
 25}
 26
 27static bool ggml_zdnn_compute_forward(
 28    ggml_backend_zdnn_context * ctx,
 29    ggml_tensor * dst) {
 30
 31    switch (dst->op) {
 32        case GGML_OP_MUL_MAT:
 33            {
 34                ggml_zdnn_compute_forward_mul_mat(ctx, dst);
 35            } break;
 36
 37        default:
 38            return false;
 39    }
 40
 41    return true;
 42}
 43
 44static enum ggml_status ggml_zdnn_graph_compute(ggml_backend_t backend, ggml_cgraph * gf) {
 45    ggml_backend_zdnn_context        * ctx     = (       ggml_backend_zdnn_context *)backend->context;
 46    ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *)backend->device->context;
 47
 48    ctx->gf = gf;
 49    for (int i = 0; i < gf->n_nodes; i++) {
 50        ggml_tensor * node = gf->nodes[i];
 51
 52        if (ggml_is_empty(node)
 53            || node->op == GGML_OP_NONE
 54            || node->op == GGML_OP_RESHAPE
 55            || node->op == GGML_OP_VIEW
 56            || node->op == GGML_OP_PERMUTE
 57            || node->op == GGML_OP_TRANSPOSE) {
 58            continue;
 59        }
 60
 61        if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) {
 62            continue;
 63        }
 64
 65        bool ok = ggml_zdnn_compute_forward(ctx, node);
 66        if (!ok) {
 67            GGML_LOG_ERROR("%s: unsupported op %s (%s)\n",
 68                           __func__, node->name, ggml_op_name(node->op));
 69        }
 70
 71        GGML_ASSERT(ok);
 72    }
 73
 74    return GGML_STATUS_SUCCESS;
 75
 76    GGML_UNUSED(ctx_dev);
 77}
 78
 79static bool ggml_zdnn_supports_op(const ggml_backend_zdnn_device_context * ctx_dev, const ggml_tensor * op) {
 80    switch (op->op) {
 81        case GGML_OP_NONE:
 82        case GGML_OP_RESHAPE:
 83        case GGML_OP_VIEW:
 84        case GGML_OP_TRANSPOSE:
 85        case GGML_OP_PERMUTE:
 86            return true;
 87
 88        case GGML_OP_MUL_MAT:
 89            {
 90                const ggml_tensor * weights = op->src[0];
 91                const ggml_tensor * inputs  = op->src[1];
 92
 93                const int64_t ne10 = inputs->ne[0];
 94                const int64_t ne0  = op->ne[0];
 95                const int64_t ne1  = op->ne[1];
 96
 97                const int64_t max_batch = ctx_dev->max_size;
 98
 99                if (!ggml_is_matrix(weights) || !ggml_is_matrix(inputs) ||
100                    !ggml_is_contiguous(weights) || !ggml_is_contiguous(inputs) ||
101                    weights->view_src != nullptr || inputs->view_src != nullptr ||
102                    ne0 > max_batch || ne1 > max_batch || ne10 > max_batch) {
103                        return false;
104                }
105
106                switch (weights->type) {
107                    case GGML_TYPE_F32:
108                    case GGML_TYPE_F16:
109                    case GGML_TYPE_BF16:
110                        return true;
111                    default:
112                        return false;
113                }
114            } break;
115
116        default:
117            return false;
118    }
119}
120
121////////////////////////////////////////////////////////////////////////////////
122
123//
124// globals
125//
126
127// initialised in ggml_backend_zdnn_reg
128static ggml_backend_reg    g_ggml_backend_zdnn_reg;
129static ggml_backend_device g_ggml_backend_zdnn_device;
130
131static ggml_backend_zdnn_device_context g_ggml_ctx_dev_main = {
132    /* .zdnn_device           = */ 0,
133    /* .zdnn_device_ref_count = */ 0,
134    /* .has_parmblkformat_0   = */ false,
135    /* .has_parmblkformat_1   = */ false,
136    /* .max_size              = */ 0,
137    /* .name                  = */ "",
138};
139
140static int ggml_backend_zdnn_device_acq(ggml_backend_zdnn_device_context * ctx) {
141    assert(ctx != NULL);
142
143    if (ctx->zdnn_device == 0) {
144        ctx->zdnn_device = 1;
145    }
146
147    if (ctx->zdnn_device >= 1) {
148        ctx->has_parmblkformat_0 = zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_0);
149        ctx->has_parmblkformat_1 = zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_1);
150        ctx->max_size = zdnn_get_nnpa_max_dim_idx_size();
151        strncpy(ctx->name, GGML_ZDNN_NAME, sizeof(ctx->name) - 1);
152    }
153
154    ctx->zdnn_device_ref_count++;
155    return ctx->zdnn_device;
156}
157
158static void ggml_backend_zdnn_device_rel(ggml_backend_zdnn_device_context * ctx) {
159    assert(ctx != NULL);
160    assert(ctx->zdnn_device_ref_count > 0);
161
162    ctx->zdnn_device_ref_count--;
163    if (ctx->zdnn_device_ref_count == 0) {
164        if (ctx->zdnn_device >= 0) {
165            ctx->zdnn_device = 0;
166        }
167    }
168}
169
170static ggml_backend_zdnn_context * ggml_zdnn_init(ggml_backend_dev_t dev) {
171    GGML_LOG_INFO("%s: allocating\n", __func__);
172    GGML_LOG_INFO("%s: found 1 device\n", __func__);
173
174    #ifdef STATIC_LIB
175    zdnn_init();
176    #endif
177
178    ggml_backend_zdnn_context * ctx = new ggml_backend_zdnn_context();
179    ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *)dev->context;
180
181    int device = 1;
182    GGML_LOG_INFO("%s: picking default device: %s\n", __func__, ctx_dev->name);
183
184    ctx->device = device;
185    GGML_LOG_INFO("%s: NNPA name: %s\n", __func__, ctx_dev->name);
186    GGML_LOG_INFO("%s: NNPA_PARMBLKFORMAT_0 = %s\n", __func__, ctx_dev->has_parmblkformat_0 ? "true" : "false");
187    GGML_LOG_INFO("%s: NNPA_PARMBLKFORMAT_1 = %s\n", __func__, ctx_dev->has_parmblkformat_1 ? "true" : "false");
188
189    ctx->gf = nullptr;
190
191    return ctx;
192}
193
194static void ggml_zdnn_free(ggml_backend_zdnn_context * ctx) {
195    GGML_LOG_INFO("%s: deallocating\n", __func__);
196    delete ctx;
197}
198
199//
200// backend interface
201//
202
203static void ggml_backend_zdnn_buffer_free_buffer(ggml_backend_buffer_t buffer) {
204    ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
205
206    for (const auto & buf_ptr : ctx->buffers) {
207        ggml_backend_zdnn_buffer * buf = buf_ptr.get();
208
209        // Free any extra buffer allocated for the tensor. E.g., bias for GGML_OP_MUL_MAT
210        if (buf->extra != nullptr) free(buf->extra->data);
211        if (buf->ztensor.buffer_size > 0) ZDNN_CHECK(zdnn_free_ztensor_buffer(&buf->ztensor));
212    }
213
214    delete ctx;
215}
216
217static void * ggml_backend_zdnn_buffer_get_base(ggml_backend_buffer_t buffer) {
218    ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
219    return ctx->all_data;
220}
221
222static enum ggml_status ggml_backend_zdnn_buffer_init_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor) {
223    if (tensor->view_src != NULL) {
224        assert(tensor->view_src->buffer->buft == buffer->buft);
225        return GGML_STATUS_SUCCESS;
226    }
227
228    ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
229
230    const int64_t tsize = ggml_nbytes(tensor);
231    int buffer_idx = ctx->n_buffers;
232
233    std::unique_ptr<ggml_backend_zdnn_buffer> zdnn_buffer = std::make_unique<ggml_backend_zdnn_buffer>();
234    zdnn_buffer->data = tensor->data;
235    zdnn_buffer->size = tsize;
236    zdnn_buffer->extra = nullptr;
237    snprintf(zdnn_buffer->name, GGML_MAX_NAME, "%s", tensor->name);
238
239    ggml_zdnn_init_tensor(zdnn_buffer.get(), tensor);
240    tensor->extra = zdnn_buffer.get();
241
242    switch (tensor->op) {
243        case GGML_OP_MUL_MAT:
244            {
245                std::unique_ptr<ggml_backend_zdnn_buffer> zdnn_bias_buffer = std::make_unique<ggml_backend_zdnn_buffer>();
246                zdnn_bias_buffer->data = (void *)calloc(tensor->ne[0], ggml_element_size(tensor));
247                zdnn_bias_buffer->size = ggml_element_size(tensor) * tensor->ne[0];
248                snprintf(zdnn_bias_buffer->name, GGML_MAX_NAME, "%.*s (bias)",
249                         GGML_MAX_NAME - (int)sizeof(" (bias)"), tensor->name);
250
251                const int64_t bias_dim[GGML_MAX_DIMS] = { 1, 1, 1, tensor->ne[0] };
252                ggml_zdnn_create_tensor(zdnn_bias_buffer->pre_tfm_desc,
253                                        zdnn_bias_buffer->tfm_desc,
254                                        zdnn_bias_buffer->ztensor,
255                                        tensor, bias_dim, ZDNN_1D);
256
257                ggml_zdnn_load_tensor(zdnn_bias_buffer->ztensor, zdnn_bias_buffer->data);
258                zdnn_buffer->extra = zdnn_bias_buffer.get();
259
260                ctx->buffers.push_back(std::move(zdnn_bias_buffer));
261                ctx->n_buffers++;
262            } break;
263        default:
264            break;
265    }
266
267    ctx->buffers.push_back(std::move(zdnn_buffer));
268    ctx->n_buffers++;
269
270    // GGML_LOG_INFO("%s: initialised tensor '%s' in buffer %d, size = %8.2f MiB\n",
271    //               __func__, tensor->name, buffer_idx, tsize);
272
273    return GGML_STATUS_SUCCESS;
274
275    GGML_UNUSED(buffer_idx);
276}
277
278static void ggml_backend_zdnn_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
279    memset((char *)tensor->data + offset, value, size);
280
281    GGML_UNUSED(buffer);
282}
283
284static void ggml_backend_zdnn_buffer_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
285    memcpy((char *)tensor->data + offset, data, size);
286
287    ggml_backend_zdnn_buffer * extra = (ggml_backend_zdnn_buffer *)tensor->extra;
288
289    // Fixes the LLAMA_SET_ROWS bug
290    // see: https://github.com/ggml-org/llama.cpp/issues/15414
291    if (tensor->buffer->usage == GGML_BACKEND_BUFFER_USAGE_COMPUTE && extra->ztensor.is_transformed) zdnn_reset_ztensor(&extra->ztensor);
292    if (extra->ztensor.is_transformed == false) ggml_zdnn_load_tensor(extra->ztensor, tensor->data);
293
294    GGML_UNUSED(buffer);
295}
296
297static void ggml_backend_zdnn_buffer_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
298    memcpy(data, (const char *)tensor->data + offset, size);
299
300    GGML_UNUSED(buffer);
301}
302
303static void ggml_backend_zdnn_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
304    ggml_backend_zdnn_buffer_context * ctx = (ggml_backend_zdnn_buffer_context *)buffer->context;
305
306    memset(ctx->all_data, value, ctx->all_size);
307}
308
309static ggml_backend_buffer_i ggml_backend_zdnn_buffer_i = {
310    /* .free_buffer   = */ ggml_backend_zdnn_buffer_free_buffer,
311    /* .get_base      = */ ggml_backend_zdnn_buffer_get_base,
312    /* .init_tensor   = */ ggml_backend_zdnn_buffer_init_tensor,
313    /* .memset_tensor = */ ggml_backend_zdnn_buffer_memset_tensor,
314    /* .set_tensor    = */ ggml_backend_zdnn_buffer_set_tensor,
315    /* .get_tensor    = */ ggml_backend_zdnn_buffer_get_tensor,
316    /* .cpy_tensor    = */ NULL,
317    /* .clear         = */ ggml_backend_zdnn_buffer_clear,
318    /* .reset         = */ NULL,
319};
320
321//
322// default buffer type
323//
324
325static const char * ggml_backend_zdnn_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
326    return GGML_ZDNN_NAME;
327
328    GGML_UNUSED(buft);
329}
330
331static ggml_backend_buffer_t ggml_backend_zdnn_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
332    ggml_backend_zdnn_buffer_context * ctx = new ggml_backend_zdnn_buffer_context();
333
334    const size_t size_page = sysconf(_SC_PAGESIZE);
335
336    size_t size_aligned = size;
337    if ((size_aligned % size_page) != 0) {
338        size_aligned += size_page - (size_aligned % size_page);
339    }
340
341    ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *)buft->device->context;
342
343    GGML_ASSERT(ctx_dev->zdnn_device >= 0);
344    int device = ctx_dev->zdnn_device; GGML_UNUSED(device);
345
346    ctx->all_data  = ggml_aligned_malloc(size_aligned);
347    ctx->all_size  = size_aligned;
348    ctx->owned     = true;
349    ctx->n_buffers = 1;
350
351    if (ctx->all_data != NULL) {
352        std::unique_ptr<ggml_backend_zdnn_buffer> zdnn_buffer = std::make_unique<ggml_backend_zdnn_buffer>();
353        zdnn_buffer->data = ctx->all_data;
354        zdnn_buffer->size = size_aligned;
355        ctx->buffers.push_back(std::move(zdnn_buffer));
356    }
357
358    if (size_aligned > 0 && (ctx->all_data == NULL)) {
359        GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f\n",
360                       __func__, size_aligned / 1024.0 / 1024.0);
361        delete ctx;
362        return NULL;
363    }
364
365    return ggml_backend_buffer_init(buft, ggml_backend_zdnn_buffer_i, ctx, size);
366}
367
368static size_t ggml_backend_zdnn_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
369    return 256;
370
371    GGML_UNUSED(buft);
372}
373
374static bool ggml_backend_zdnn_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
375    /* while it resides in host memory, additional transformation is needed */
376    return false;
377
378    GGML_UNUSED(buft);
379}
380
381ggml_backend_buffer_type_t ggml_backend_zdnn_buffer_type(void) {
382    static ggml_backend_buffer_type ggml_backend_buffer_type_zdnn = {
383        /* .iface   = */ {
384            /* .get_name       = */ ggml_backend_zdnn_buffer_type_get_name,
385            /* .alloc_buffer   = */ ggml_backend_zdnn_buffer_type_alloc_buffer,
386            /* .get_alignment  = */ ggml_backend_zdnn_buffer_type_get_alignment,
387            /* .get_max_size   = */ NULL,
388            /* .get_alloc_size = */ NULL,  // defaults to ggml_nbytes
389            /* .is_host        = */ ggml_backend_zdnn_buffer_type_is_host,
390        },
391        /* .device  = */ &g_ggml_backend_zdnn_device,
392        /* .context = */ NULL,
393    };
394
395    return &ggml_backend_buffer_type_zdnn;
396}
397
398//
399// backend
400//
401
402static const char * ggml_backend_zdnn_name(ggml_backend_t backend) {
403    return GGML_ZDNN_NAME;
404
405    GGML_UNUSED(backend);
406}
407
408static void ggml_backend_zdnn_free(ggml_backend_t backend) {
409    ggml_backend_zdnn_context * ctx = (ggml_backend_zdnn_context *)backend->context;
410
411    ggml_zdnn_free(ctx);
412    free(backend);
413}
414
415static enum ggml_status ggml_backend_zdnn_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
416    return ggml_zdnn_graph_compute(backend, cgraph);
417}
418
419static ggml_backend_i ggml_backend_zdnn_i = {
420    /* .get_name           = */ ggml_backend_zdnn_name,
421    /* .free               = */ ggml_backend_zdnn_free,
422    /* .set_tensor_async   = */ NULL,
423    /* .get_tensor_async   = */ NULL,
424    /* .cpy_tensor_async   = */ NULL,
425    /* .synchronize        = */ NULL,
426    /* .graph_plan_create  = */ NULL,
427    /* .graph_plan_free    = */ NULL,
428    /* .graph_plan_update  = */ NULL,
429    /* .graph_plan_compute = */ NULL,
430    /* .graph_compute      = */ ggml_backend_zdnn_graph_compute,
431    /* .event_record       = */ NULL,
432    /* .event_wait         = */ NULL,
433    /* .graph_optimize     = */ NULL,
434};
435
436static ggml_guid_t ggml_backend_zdnn_guid(void) {
437    static const char * guid_str = "IBM-ZDNN-ACCELER";
438    return reinterpret_cast<ggml_guid_t>((void *)guid_str);
439}
440
441bool ggml_backend_is_zdnn(ggml_backend_t backend) {
442    return backend != NULL &&
443           ggml_guid_matches(backend->guid, ggml_backend_zdnn_guid());
444
445    GGML_UNUSED(backend);
446}
447
448//
449// backend device
450//
451
452static const char * ggml_backend_zdnn_device_get_name(ggml_backend_dev_t dev) {
453    return GGML_ZDNN_NAME;
454
455    GGML_UNUSED(dev);
456}
457
458static const char * ggml_backend_zdnn_device_get_description(ggml_backend_dev_t dev) {
459    return "IBM Z Neural Network Processing Assist (NNPA)";
460
461    GGML_UNUSED(dev);
462}
463
464static void ggml_backend_zdnn_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
465    *free  = 0;
466    *total = 0;
467
468    GGML_UNUSED(dev);
469}
470
471static enum ggml_backend_dev_type ggml_backend_zdnn_device_get_type(ggml_backend_dev_t dev) {
472    return GGML_BACKEND_DEVICE_TYPE_ACCEL;
473
474    GGML_UNUSED(dev);
475}
476
477static void ggml_backend_zdnn_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
478    props->name        = ggml_backend_zdnn_device_get_name(dev);
479    props->description = ggml_backend_zdnn_device_get_description(dev);
480    props->type        = ggml_backend_zdnn_device_get_type(dev);
481    ggml_backend_zdnn_device_get_memory(dev, &props->memory_free, &props->memory_total);
482    props->caps = (ggml_backend_dev_caps) {
483        /* .async                = */ false,
484        /* .host_buffer          = */ false,
485        /* .buffer_from_host_ptr = */ false,
486        /* .events               = */ false
487    };
488}
489
490static ggml_backend_t ggml_backend_zdnn_device_init(ggml_backend_dev_t dev, const char * params) {
491    ggml_backend_zdnn_context * ctx = ggml_zdnn_init(dev);
492    if (ctx == NULL) {
493        GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
494        return NULL;
495    }
496
497    ggml_backend_t backend = (ggml_backend *)malloc(sizeof(ggml_backend));
498    *backend = (ggml_backend) {
499        /* .guid       = */ ggml_backend_zdnn_guid(),
500        /* .iface      = */ ggml_backend_zdnn_i,
501        /* .device     = */ dev,
502        /* .context    = */ ctx
503    };
504
505    return backend;
506
507    GGML_UNUSED(params);
508}
509
510static ggml_backend_buffer_type_t ggml_backend_zdnn_device_get_buffer_type(ggml_backend_dev_t dev) {
511    return ggml_backend_zdnn_buffer_type();
512
513    GGML_UNUSED(dev);
514}
515
516static bool ggml_backend_zdnn_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
517    ggml_backend_zdnn_device_context * ctx_dev = (ggml_backend_zdnn_device_context *) dev->context;
518
519    return ggml_zdnn_supports_op(ctx_dev, op);
520}
521
522static bool ggml_backend_zdnn_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
523    return
524        buft->iface.get_name == ggml_backend_zdnn_buffer_type_get_name;
525
526    GGML_UNUSED(dev);
527}
528
529static ggml_backend_device_i ggml_backend_zdnn_device_i = {
530    /* .get_name             = */ ggml_backend_zdnn_device_get_name,
531    /* .get_description      = */ ggml_backend_zdnn_device_get_description,
532    /* .get_memory           = */ ggml_backend_zdnn_device_get_memory,
533    /* .get_type             = */ ggml_backend_zdnn_device_get_type,
534    /* .get_props            = */ ggml_backend_zdnn_device_get_props,
535    /* .init_backend         = */ ggml_backend_zdnn_device_init,
536    /* .get_buffer_type      = */ ggml_backend_zdnn_device_get_buffer_type,
537    /* .get_host_buffer_type = */ NULL,
538    /* .buffer_from_host_ptr = */ NULL,
539    /* .supports_op          = */ ggml_backend_zdnn_device_supports_op,
540    /* .supports_buft        = */ ggml_backend_zdnn_device_supports_buft,
541    /* .offload_op           = */ NULL,
542    /* .event_new            = */ NULL,
543    /* .event_free           = */ NULL,
544    /* .event_synchronize    = */ NULL,
545};
546
547//
548// backend registry
549//
550
551static const char * ggml_backend_zdnn_reg_get_name(ggml_backend_reg_t reg) {
552    return GGML_ZDNN_NAME;
553
554    GGML_UNUSED(reg);
555}
556
557static size_t ggml_backend_zdnn_reg_device_count(ggml_backend_reg_t reg) {
558    if (!zdnn_is_nnpa_installed()) {
559        return 0;
560    }
561    return 1;
562
563    GGML_UNUSED(reg);
564}
565
566static ggml_backend_dev_t ggml_backend_zdnn_reg_device_get(ggml_backend_reg_t reg, size_t index) {
567    GGML_ASSERT(index == 0);
568
569    return &g_ggml_backend_zdnn_device;
570
571    GGML_UNUSED(reg);
572    GGML_UNUSED(index);
573}
574
575static ggml_backend_feature g_ggml_backend_zdnn_features[] = {
576    { "NNPA", zdnn_is_nnpa_installed() ? "1" : "0" },
577    { "NNPA_PARMBLKFORMAT_0", zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_0) ? "1" : "0" },
578    { "NNPA_PARMBLKFORMAT_1", zdnn_is_nnpa_parmblk_fmt_installed(1, NNPA_PARMBLKFORMAT_1) ? "1" : "0" },
579    { NULL, NULL },
580};
581
582static ggml_backend_feature * ggml_backend_zdnn_get_features(ggml_backend_reg_t reg) {
583    return g_ggml_backend_zdnn_features;
584
585    GGML_UNUSED(reg);
586}
587
588static void * ggml_backend_zdnn_get_proc_address(ggml_backend_reg_t reg, const char * name) {
589    if (strcmp(name, "ggml_backend_get_features") == 0) {
590        return (void *) ggml_backend_zdnn_get_features;
591    }
592
593    return NULL;
594
595    GGML_UNUSED(reg);
596}
597
598static ggml_backend_reg_i ggml_backend_zdnn_reg_i = {
599    /* .get_name         = */ ggml_backend_zdnn_reg_get_name,
600    /* .get_device_count = */ ggml_backend_zdnn_reg_device_count,
601    /* .get_device       = */ ggml_backend_zdnn_reg_device_get,
602    /* .get_proc_address = */ ggml_backend_zdnn_get_proc_address
603};
604
605static void ggml_zdnn_cleanup(void) {
606    ggml_backend_zdnn_device_rel(&g_ggml_ctx_dev_main);
607}
608
609// TODO: make thread-safe
610ggml_backend_reg_t ggml_backend_zdnn_reg(void) {
611    ggml_backend_zdnn_device_acq(&g_ggml_ctx_dev_main);
612
613    // register cleanup callback
614    atexit(ggml_zdnn_cleanup);
615
616    {
617        g_ggml_backend_zdnn_reg = (ggml_backend_reg) {
618            /* .api_version = */ GGML_ZDNN_VERSION,
619            /* .iface       = */ ggml_backend_zdnn_reg_i,
620            /* .context     = */ NULL
621        };
622
623        g_ggml_backend_zdnn_device = (ggml_backend_device) {
624            /* .iface       = */ ggml_backend_zdnn_device_i,
625            /* .reg         = */ &g_ggml_backend_zdnn_reg,
626            /* .context     = */ &g_ggml_ctx_dev_main
627        };
628
629        return &g_ggml_backend_zdnn_reg;
630    }
631}
632
633GGML_BACKEND_DL_IMPL(ggml_backend_zdnn_reg)