1#pragma once
 2
 3/* device */
 4void                           apir_device_get_device_count(struct virtgpu * gpu);
 5int                            apir_device_get_count(struct virtgpu * gpu);
 6char *                         apir_device_get_name(struct virtgpu * gpu);
 7char *                         apir_device_get_description(struct virtgpu * gpu);
 8uint32_t                       apir_device_get_type(struct virtgpu * gpu);
 9void                           apir_device_get_memory(struct virtgpu * gpu, size_t * free, size_t * total);
10bool                           apir_device_supports_op(struct virtgpu * gpu, const ggml_tensor * op);
11apir_buffer_type_host_handle_t apir_device_get_buffer_type(struct virtgpu * gpu);
12void                           apir_device_get_props(struct virtgpu * gpu,
13                                                     bool *           async,
14                                                     bool *           host_buffer,
15                                                     bool *           buffer_from_host_ptr,
16                                                     bool *           events);
17apir_buffer_context_t          apir_device_buffer_from_ptr(struct virtgpu * gpu, size_t size, size_t max_tensor_size);
18
19/* buffer-type */
20char *                apir_buffer_type_get_name(struct virtgpu * gpu, apir_buffer_type_host_handle_t host_handle);
21size_t                apir_buffer_type_get_alignment(struct virtgpu * gpu, apir_buffer_type_host_handle_t host_handle);
22size_t                apir_buffer_type_get_max_size(struct virtgpu * gpu, apir_buffer_type_host_handle_t host_handle);
23apir_buffer_context_t apir_buffer_type_alloc_buffer(struct virtgpu *               gpu,
24                                                    apir_buffer_type_host_handle_t host_handle,
25                                                    size_t                         size);
26size_t                apir_buffer_type_get_alloc_size(struct virtgpu *               gpu,
27                                                      apir_buffer_type_host_handle_t host_handle,
28                                                      const ggml_tensor *            op);
29
30/* buffer */
31void * apir_buffer_get_base(struct virtgpu * gpu, apir_buffer_context_t * buffer_context);
32void   apir_buffer_set_tensor(struct virtgpu *        gpu,
33                              apir_buffer_context_t * buffer_context,
34                              ggml_tensor *           tensor,
35                              const void *            data,
36                              size_t                  offset,
37                              size_t                  size);
38void   apir_buffer_get_tensor(struct virtgpu *        gpu,
39                              apir_buffer_context_t * buffer_context,
40                              const ggml_tensor *     tensor,
41                              void *                  data,
42                              size_t                  offset,
43                              size_t                  size);
44bool   apir_buffer_cpy_tensor(struct virtgpu *        gpu,
45                              apir_buffer_context_t * buffer_context,
46                              const ggml_tensor *     src,
47                              const ggml_tensor *     dst);
48void   apir_buffer_clear(struct virtgpu * gpu, apir_buffer_context_t * buffer_context, uint8_t value);
49void   apir_buffer_free_buffer(struct virtgpu * gpu, apir_buffer_context_t * buffer_context);
50
51/* backend */
52ggml_status apir_backend_graph_compute(struct virtgpu * gpu, ggml_cgraph * cgraph);