1#pragma once
2
3// GGML internal header
4
5#include "ggml.h"
6#include "gguf.h"
7
8#include <assert.h>
9#include <math.h>
10#include <stdlib.h> // load `stdlib.h` before other headers to work around MinGW bug: https://sourceforge.net/p/mingw-w64/bugs/192/
11#include <stdbool.h>
12#include <stdint.h>
13#include <string.h>
14
15#ifdef __ARM_FEATURE_SVE
16#include <arm_sve.h>
17#endif // __ARM_FEATURE_SVE
18
19#if defined(__ARM_NEON) && !defined(__CUDACC__) && !defined(__MUSACC__)
20// if YCM cannot find <arm_neon.h>, make a symbolic link to it, for example:
21//
22// $ ln -sfn /Library/Developer/CommandLineTools/usr/lib/clang/13.1.6/include/arm_neon.h ./src/
23//
24#include <arm_neon.h>
25#endif
26
27#ifdef __cplusplus
28extern "C" {
29#endif
30
31void ggml_print_backtrace(void);
32
33#ifndef MIN
34# define MIN(a, b) ((a) < (b) ? (a) : (b))
35#endif
36
37#ifndef MAX
38# define MAX(a, b) ((a) > (b) ? (a) : (b))
39#endif
40
41// required for mmap as gguf only guarantees 32-byte alignment
42#define TENSOR_ALIGNMENT 32
43
44// static_assert should be a #define, but if it's not,
45// fall back to the _Static_assert C11 keyword.
46// if C99 - static_assert is noop
47// ref: https://stackoverflow.com/a/53923785/4039976
48#ifndef __cplusplus
49 #ifndef static_assert
50 #if defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201100L)
51 #define static_assert(cond, msg) _Static_assert(cond, msg)
52 #else
53 #define static_assert(cond, msg) struct global_scope_noop_trick
54 #endif
55 #endif
56#endif
57
58static inline int ggml_up32(int n) {
59 return (n + 31) & ~31;
60}
61
62//static inline int ggml_up64(int n) {
63// return (n + 63) & ~63;
64//}
65
66static inline int ggml_up(int n, int m) {
67 // assert m is a power of 2
68 GGML_ASSERT((m & (m - 1)) == 0);
69 return (n + m - 1) & ~(m - 1);
70}
71
72// TODO: move to ggml.h? (won't be able to inline)
73static bool ggml_are_same_layout(const struct ggml_tensor * a, const struct ggml_tensor * b) {
74 if (a->type != b->type) {
75 return false;
76 }
77 for (int i = 0; i < GGML_MAX_DIMS; i++) {
78 if (a->ne[i] != b->ne[i]) {
79 return false;
80 }
81 if (a->nb[i] != b->nb[i]) {
82 return false;
83 }
84 }
85 return true;
86}
87
88static bool ggml_op_is_empty(enum ggml_op op) {
89 switch (op) {
90 case GGML_OP_NONE:
91 case GGML_OP_RESHAPE:
92 case GGML_OP_TRANSPOSE:
93 case GGML_OP_VIEW:
94 case GGML_OP_PERMUTE:
95 return true;
96 default:
97 return false;
98 }
99}
100
101static inline float ggml_compute_softplus_f32(float input) {
102 return (input > 20.0f) ? input : logf(1 + expf(input));
103}
104//
105// logging
106//
107
108GGML_ATTRIBUTE_FORMAT(2, 3)
109GGML_API void ggml_log_internal (enum ggml_log_level level, const char * format, ...);
110GGML_API void ggml_log_callback_default(enum ggml_log_level level, const char * text, void * user_data);
111
112#define GGML_LOG(...) ggml_log_internal(GGML_LOG_LEVEL_NONE , __VA_ARGS__)
113#define GGML_LOG_INFO(...) ggml_log_internal(GGML_LOG_LEVEL_INFO , __VA_ARGS__)
114#define GGML_LOG_WARN(...) ggml_log_internal(GGML_LOG_LEVEL_WARN , __VA_ARGS__)
115#define GGML_LOG_ERROR(...) ggml_log_internal(GGML_LOG_LEVEL_ERROR, __VA_ARGS__)
116#define GGML_LOG_DEBUG(...) ggml_log_internal(GGML_LOG_LEVEL_DEBUG, __VA_ARGS__)
117#define GGML_LOG_CONT(...) ggml_log_internal(GGML_LOG_LEVEL_CONT , __VA_ARGS__)
118
119#define GGML_DEBUG 0
120
121#if (GGML_DEBUG >= 1)
122#define GGML_PRINT_DEBUG(...) GGML_LOG_DEBUG(__VA_ARGS__)
123#else
124#define GGML_PRINT_DEBUG(...)
125#endif
126
127#if (GGML_DEBUG >= 5)
128#define GGML_PRINT_DEBUG_5(...) GGML_LOG_DEBUG(__VA_ARGS__)
129#else
130#define GGML_PRINT_DEBUG_5(...)
131#endif
132
133#if (GGML_DEBUG >= 10)
134#define GGML_PRINT_DEBUG_10(...) GGML_LOG_DEBUG(__VA_ARGS__)
135#else
136#define GGML_PRINT_DEBUG_10(...)
137#endif
138
139// tensor params
140
141static void ggml_set_op_params(struct ggml_tensor * tensor, const void * params, size_t params_size) {
142 GGML_ASSERT(tensor != NULL); // silence -Warray-bounds warnings
143 assert(params_size <= GGML_MAX_OP_PARAMS);
144 memcpy(tensor->op_params, params, params_size);
145}
146
147static int32_t ggml_get_op_params_i32(const struct ggml_tensor * tensor, uint32_t i) {
148 assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
149 return ((const int32_t *)(tensor->op_params))[i];
150}
151
152static float ggml_get_op_params_f32(const struct ggml_tensor * tensor, uint32_t i) {
153 assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
154 return ((const float *)(tensor->op_params))[i];
155}
156
157static void ggml_set_op_params_i32(struct ggml_tensor * tensor, uint32_t i, int32_t value) {
158 assert(i < GGML_MAX_OP_PARAMS / sizeof(int32_t));
159 ((int32_t *)(tensor->op_params))[i] = value;
160}
161
162static void ggml_set_op_params_f32(struct ggml_tensor * tensor, uint32_t i, float value) {
163 assert(i < GGML_MAX_OP_PARAMS / sizeof(float));
164 ((float *)(tensor->op_params))[i] = value;
165}
166
167struct ggml_map_custom1_op_params {
168 ggml_custom1_op_t fun;
169 int n_tasks;
170 void * userdata;
171};
172
173struct ggml_map_custom2_op_params {
174 ggml_custom2_op_t fun;
175 int n_tasks;
176 void * userdata;
177};
178
179struct ggml_map_custom3_op_params {
180 ggml_custom3_op_t fun;
181 int n_tasks;
182 void * userdata;
183};
184
185struct ggml_custom_op_params {
186 ggml_custom_op_t fun;
187 int n_tasks;
188 void * userdata;
189};
190
191// bitset
192
193typedef uint32_t ggml_bitset_t;
194
195static_assert(sizeof(ggml_bitset_t) == 4, "bitset_t constants must be updated");
196#define BITSET_SHR 5 // log2(sizeof(ggml_bitset_t)*8)
197#define BITSET_MASK (sizeof(ggml_bitset_t)*8 - 1)
198
199static size_t ggml_bitset_size(size_t n) {
200 return (n + BITSET_MASK) >> BITSET_SHR;
201}
202
203static inline bool ggml_bitset_get(const ggml_bitset_t * bitset, size_t i) {
204 return !!(bitset[i >> BITSET_SHR] & (1u << (i & BITSET_MASK)));
205}
206
207static inline void ggml_bitset_set(ggml_bitset_t * bitset, size_t i) {
208 bitset[i >> BITSET_SHR] |= (1u << (i & BITSET_MASK));
209}
210
211static inline void ggml_bitset_clear(ggml_bitset_t * bitset, size_t i) {
212 bitset[i >> BITSET_SHR] &= ~(1u << (i & BITSET_MASK));
213}
214
215// hash set
216
217#define GGML_HASHSET_FULL ((size_t)-1)
218#define GGML_HASHSET_ALREADY_EXISTS ((size_t)-2)
219
220struct ggml_hash_set {
221 size_t size;
222 ggml_bitset_t * used; // whether or not the keys are in use i.e. set
223 struct ggml_tensor ** keys; // actual tensors in the set, keys[i] is only defined if ggml_bitset_get(used, i)
224};
225
226struct ggml_hash_set ggml_hash_set_new(size_t size);
227void ggml_hash_set_free(struct ggml_hash_set * hash_set);
228
229// returns the minimum size for a hash set that can hold min_sz elements
230size_t ggml_hash_size(size_t min_sz);
231
232// remove all elements from the hash set
233void ggml_hash_set_reset(struct ggml_hash_set * hash_set);
234
235// returns true if key is in the hash set
236static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key);
237
238// returns GGML_HASHSET_FULL if table is full, otherwise the current index of the key or where it should be inserted
239static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key);
240
241// returns GGML_HASHSET_ALREADY_EXISTS if key already exists, index otherwise, asserts if table is full
242static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
243
244// return index, asserts if table is full
245static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key);
246
247// hash function for ggml_tensor
248static inline size_t ggml_hash(const struct ggml_tensor * p) {
249 // the last 4 bits are always zero due to alignment
250 return (size_t)(uintptr_t)p >> 4;
251}
252
253static size_t ggml_hash_find(const struct ggml_hash_set * hash_set, const struct ggml_tensor * key) {
254 size_t h = ggml_hash(key) % hash_set->size;
255
256 // linear probing
257 size_t i = h;
258 while (ggml_bitset_get(hash_set->used, i) && hash_set->keys[i] != key) {
259 i = (i + 1) % hash_set->size;
260 if (i == h) {
261 // visited all hash table entries -> not found
262 return GGML_HASHSET_FULL;
263 }
264 }
265 return i;
266}
267
268static bool ggml_hash_contains(const struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
269 size_t i = ggml_hash_find(hash_set, key);
270 return i != GGML_HASHSET_FULL && ggml_bitset_get(hash_set->used, i);
271}
272
273static size_t ggml_hash_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
274 size_t h = ggml_hash(key) % hash_set->size;
275
276 // linear probing
277 size_t i = h;
278 do {
279 if (!ggml_bitset_get(hash_set->used, i)) {
280 ggml_bitset_set(hash_set->used, i);
281 hash_set->keys[i] = key;
282 return i;
283 }
284 if (hash_set->keys[i] == key) {
285 return GGML_HASHSET_ALREADY_EXISTS;
286 }
287 i = (i + 1) % hash_set->size;
288 } while (i != h);
289
290 // visited all hash table entries -> not found
291 GGML_ABORT("fatal error");
292}
293
294static size_t ggml_hash_find_or_insert(struct ggml_hash_set * hash_set, struct ggml_tensor * key) {
295 size_t h = ggml_hash(key) % hash_set->size;
296
297 // linear probing
298 size_t i = h;
299 do {
300 if (!ggml_bitset_get(hash_set->used, i)) {
301 ggml_bitset_set(hash_set->used, i);
302 hash_set->keys[i] = key;
303 return i;
304 }
305 if (hash_set->keys[i] == key) {
306 return i;
307 }
308 i = (i + 1) % hash_set->size;
309 } while (i != h);
310
311 // visited all hash table entries -> not found
312 GGML_ABORT("fatal error");
313}
314
315// computation graph
316
317enum ggml_cgraph_eval_order {
318 GGML_CGRAPH_EVAL_ORDER_LEFT_TO_RIGHT = 0,
319 GGML_CGRAPH_EVAL_ORDER_RIGHT_TO_LEFT,
320 GGML_CGRAPH_EVAL_ORDER_COUNT
321};
322
323struct ggml_cgraph {
324 int size; // maximum number of nodes/leafs/grads/grad_accs
325 int n_nodes; // number of nodes currently in use
326 int n_leafs; // number of leafs currently in use
327
328 struct ggml_tensor ** nodes; // tensors with data that can change if the graph is evaluated
329 struct ggml_tensor ** grads; // the outputs of these tensors are the gradients of the nodes
330 struct ggml_tensor ** grad_accs; // accumulators for node gradients
331 struct ggml_tensor ** leafs; // tensors with constant data
332 int32_t * use_counts;// number of uses of each tensor, indexed by hash table slot
333
334 struct ggml_hash_set visited_hash_set;
335
336 enum ggml_cgraph_eval_order order;
337};
338
339// returns a slice of cgraph with nodes [i0, i1)
340// the slice does not have leafs or gradients
341// if you need the gradients, get them from the original graph
342struct ggml_cgraph ggml_graph_view(struct ggml_cgraph * cgraph, int i0, int i1);
343
344// ggml-alloc.c: true if the operation can reuse memory from its sources
345GGML_API bool ggml_op_can_inplace(enum ggml_op op);
346
347
348// Memory allocation
349
350GGML_API void * ggml_aligned_malloc(size_t size);
351GGML_API void ggml_aligned_free(void * ptr, size_t size);
352
353// FP16 <-> FP32
354// ref: https://github.com/Maratyszcza/FP16
355
356static inline float fp32_from_bits(uint32_t w) {
357 union {
358 uint32_t as_bits;
359 float as_value;
360 } fp32;
361 fp32.as_bits = w;
362 return fp32.as_value;
363}
364
365static inline uint32_t fp32_to_bits(float f) {
366 union {
367 float as_value;
368 uint32_t as_bits;
369 } fp32;
370 fp32.as_value = f;
371 return fp32.as_bits;
372}
373
374static inline float ggml_compute_fp16_to_fp32(ggml_fp16_t h) {
375 const uint32_t w = (uint32_t) h << 16;
376 const uint32_t sign = w & UINT32_C(0x80000000);
377 const uint32_t two_w = w + w;
378
379 const uint32_t exp_offset = UINT32_C(0xE0) << 23;
380#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
381 const float exp_scale = 0x1.0p-112f;
382#else
383 const float exp_scale = fp32_from_bits(UINT32_C(0x7800000));
384#endif
385 const float normalized_value = fp32_from_bits((two_w >> 4) + exp_offset) * exp_scale;
386
387 const uint32_t magic_mask = UINT32_C(126) << 23;
388 const float magic_bias = 0.5f;
389 const float denormalized_value = fp32_from_bits((two_w >> 17) | magic_mask) - magic_bias;
390
391 const uint32_t denormalized_cutoff = UINT32_C(1) << 27;
392 const uint32_t result = sign |
393 (two_w < denormalized_cutoff ? fp32_to_bits(denormalized_value) : fp32_to_bits(normalized_value));
394 return fp32_from_bits(result);
395}
396
397static inline ggml_fp16_t ggml_compute_fp32_to_fp16(float f) {
398#if (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) || defined(__GNUC__) && !defined(__STRICT_ANSI__)) && (!defined(__cplusplus) || __cplusplus >= 201703L)
399 const float scale_to_inf = 0x1.0p+112f;
400 const float scale_to_zero = 0x1.0p-110f;
401#else
402 const float scale_to_inf = fp32_from_bits(UINT32_C(0x77800000));
403 const float scale_to_zero = fp32_from_bits(UINT32_C(0x08800000));
404#endif
405 float base = (fabsf(f) * scale_to_inf) * scale_to_zero;
406
407 const uint32_t w = fp32_to_bits(f);
408 const uint32_t shl1_w = w + w;
409 const uint32_t sign = w & UINT32_C(0x80000000);
410 uint32_t bias = shl1_w & UINT32_C(0xFF000000);
411 if (bias < UINT32_C(0x71000000)) {
412 bias = UINT32_C(0x71000000);
413 }
414
415 base = fp32_from_bits((bias >> 1) + UINT32_C(0x07800000)) + base;
416 const uint32_t bits = fp32_to_bits(base);
417 const uint32_t exp_bits = (bits >> 13) & UINT32_C(0x00007C00);
418 const uint32_t mantissa_bits = bits & UINT32_C(0x00000FFF);
419 const uint32_t nonsign = exp_bits + mantissa_bits;
420 return (sign >> 16) | (shl1_w > UINT32_C(0xFF000000) ? UINT16_C(0x7E00) : nonsign);
421}
422
423#define GGML_COMPUTE_FP16_TO_FP32(x) ggml_compute_fp16_to_fp32(x)
424#define GGML_COMPUTE_FP32_TO_FP16(x) ggml_compute_fp32_to_fp16(x)
425
426#define GGML_FP16_TO_FP32(x) GGML_COMPUTE_FP16_TO_FP32(x)
427#define GGML_FP32_TO_FP16(x) GGML_COMPUTE_FP32_TO_FP16(x)
428
429static inline float ggml_e8m0_to_fp32(uint8_t x) {
430 uint32_t bits; // Stores the raw bit representation of the float
431
432 // Handle special case for minimum exponent (denormalized float)
433 if (x == 0) {
434 // Bit pattern for 2^(-127):
435 // - Sign bit: 0 (positive)
436 // - Exponent: 0 (denormalized number)
437 // - Mantissa: 0x400000 (0.5 in fractional form)
438 // Value = 0.5 * 2^(-126) = 2^(-127)
439 bits = 0x00400000;
440 }
441 // note: disabled as we don't need to handle NaNs
442 //// Handle special case for NaN (all bits set)
443 //else if (x == 0xFF) {
444 // // Standard quiet NaN pattern:
445 // // - Sign bit: 0
446 // // - Exponent: all 1s (0xFF)
447 // // - Mantissa: 0x400000 (quiet NaN flag)
448 // bits = 0x7FC00000;
449 //}
450 // Normalized values (most common case)
451 else {
452 // Construct normalized float by shifting exponent into position:
453 // - Exponent field: 8 bits (positions 30-23)
454 // - Mantissa: 0 (implicit leading 1)
455 // Value = 2^(x - 127)
456 bits = (uint32_t) x << 23;
457 }
458
459 float result; // Final float value
460 // Safely reinterpret bit pattern as float without type-punning issues
461 memcpy(&result, &bits, sizeof(float));
462 return result;
463}
464
465// Equal to ggml_e8m0_to_fp32/2
466// Useful with MXFP4 quantization since the E0M2 values are doubled
467static inline float ggml_e8m0_to_fp32_half(uint8_t x) {
468 uint32_t bits;
469
470 // For x < 2: use precomputed denormal patterns
471 if (x < 2) {
472 // 0x00200000 = 2^(-128), 0x00400000 = 2^(-127)
473 bits = 0x00200000 << x;
474 }
475 // For x >= 2: normalized exponent adjustment
476 else {
477 // 0.5 * 2^(x-127) = 2^(x-128) = normalized with exponent (x-1)
478 bits = (uint32_t)(x - 1) << 23;
479 }
480 // Note: NaNs are not handled here
481
482 float result;
483 memcpy(&result, &bits, sizeof(float));
484 return result;
485}
486
487#define GGML_E8M0_TO_FP32(x) ggml_e8m0_to_fp32(x)
488#define GGML_E8M0_TO_FP32_HALF(x) ggml_e8m0_to_fp32_half(x)
489
490/**
491 * Converts brain16 to float32.
492 *
493 * The bfloat16 floating point format has the following structure:
494 *
495 * โsign
496 * โ
497 * โ โexponent
498 * โ โ
499 * โ โ โmantissa
500 * โ โ โ
501 * โโโโโดโโโโโโโดโโโโ
502 * 0b0000000000000000 brain16
503 *
504 * Since bf16 has the same number of exponent bits as a 32bit float,
505 * encoding and decoding numbers becomes relatively straightforward.
506 *
507 * โsign
508 * โ
509 * โ โexponent
510 * โ โ
511 * โ โ โmantissa
512 * โ โ โ
513 * โโโโโดโโโโโโโดโโโโโโโโโโโโโโโโโโโโ
514 * 0b00000000000000000000000000000000 IEEE binary32
515 *
516 * For comparison, the standard fp16 format has fewer exponent bits.
517 *
518 * โsign
519 * โ
520 * โ โexponent
521 * โ โ
522 * โ โ โmantissa
523 * โ โ โ
524 * โโโโดโโโโโดโโโโโโโ
525 * 0b0000000000000000 IEEE binary16
526 *
527 * @see IEEE 754-2008
528 */
529static inline float ggml_compute_bf16_to_fp32(ggml_bf16_t h) {
530 union {
531 float f;
532 uint32_t i;
533 } u;
534 u.i = (uint32_t)h.bits << 16;
535 return u.f;
536}
537
538/**
539 * Converts float32 to brain16.
540 *
541 * This is binary identical with Google Brain float conversion.
542 * Floats shall round to nearest even, and NANs shall be quiet.
543 * Subnormals aren't flushed to zero, except perhaps when used.
544 * This code should vectorize nicely if using modern compilers.
545 */
546static inline ggml_bf16_t ggml_compute_fp32_to_bf16(float s) {
547 ggml_bf16_t h;
548 union {
549 float f;
550 uint32_t i;
551 } u;
552 u.f = s;
553 if ((u.i & 0x7fffffff) > 0x7f800000) { /* nan */
554 h.bits = (u.i >> 16) | 64; /* force to quiet */
555 return h;
556 }
557 h.bits = (u.i + (0x7fff + ((u.i >> 16) & 1))) >> 16;
558 return h;
559}
560
561#define GGML_FP32_TO_BF16(x) ggml_compute_fp32_to_bf16(x)
562#define GGML_BF16_TO_FP32(x) ggml_compute_bf16_to_fp32(x)
563
564static inline int32_t ggml_node_get_use_count(const struct ggml_cgraph * cgraph, int node_idx) {
565 const struct ggml_tensor * node = cgraph->nodes[node_idx];
566
567 size_t hash_pos = ggml_hash_find(&cgraph->visited_hash_set, node);
568 if (!ggml_bitset_get(cgraph->visited_hash_set.used, hash_pos)) {
569 return 0;
570 }
571 return cgraph->use_counts[hash_pos];
572}
573
574// return true if the node's results are only used by N other nodes
575// and can be fused into their calculations.
576static inline bool ggml_node_has_n_uses(const struct ggml_cgraph * cgraph, int node_idx, int32_t n_uses) {
577 const struct ggml_tensor * node = cgraph->nodes[node_idx];
578
579 // check the use count against how many we're replacing
580 if (ggml_node_get_use_count(cgraph, node_idx) != n_uses) {
581 return false;
582 }
583
584 // if node is a view, some other node might be using the intermediate result
585 // via the view source.
586 if (node->view_src) {
587 return false;
588 }
589
590 // If the user requested output for the node, can't fuse
591 if (node->flags & GGML_TENSOR_FLAG_OUTPUT) {
592 return false;
593 }
594
595 return true;
596}
597
598// Returns true if nodes with indices { node_idxs } are the sequence of ggml_ops in ops[]
599// and are fusable. Nodes are considered fusable according to this function if:
600// - all nodes except the last have only one use and are not views/outputs (see ggml_node_has_N_uses).
601// - all nodes except the last are a src of the following node.
602// - all nodes are the same shape.
603// TODO: Consider allowing GGML_OP_NONE nodes in between
604static inline bool ggml_can_fuse_ext(const struct ggml_cgraph * cgraph, const int * node_idxs, const enum ggml_op * ops, int num_ops) {
605 for (int i = 0; i < num_ops; ++i) {
606 if (node_idxs[i] >= cgraph->n_nodes) {
607 return false;
608 }
609
610 struct ggml_tensor * node = cgraph->nodes[node_idxs[i]];
611 if (node->op != ops[i]) {
612 return false;
613 }
614 if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) {
615 return false;
616 }
617 if (i < num_ops - 1 && !ggml_node_has_n_uses(cgraph, node_idxs[i], 1)) {
618 return false;
619 }
620 if (i > 0) {
621 struct ggml_tensor * prev = cgraph->nodes[node_idxs[i - 1]];
622 if (node->src[0] != prev && node->src[1] != prev) {
623 return false;
624 }
625 if (!ggml_are_same_shape(node, prev)) {
626 return false;
627 }
628 }
629 }
630 return true;
631}
632
633// same as above, for sequential indices starting at node_idx
634static inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, const enum ggml_op * ops, int num_ops) {
635 assert(num_ops < 32);
636
637 if (node_idx + num_ops > cgraph->n_nodes) {
638 return false;
639 }
640
641 int idxs[32];
642 for (int i = 0; i < num_ops; ++i) {
643 idxs[i] = node_idx + i;
644 }
645
646 return ggml_can_fuse_ext(cgraph, idxs, ops, num_ops);
647}
648
649GGML_API bool ggml_can_fuse_subgraph_ext(const struct ggml_cgraph * cgraph,
650 const int * node_idxs,
651 int count,
652 const enum ggml_op * ops,
653 const int * outputs,
654 int num_outputs);
655
656// Returns true if the subgraph formed by {node_idxs} can be fused
657// checks whethers all nodes which are not part of outputs can be elided
658// by checking if their num_uses are confined to the subgraph
659static inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph,
660 int node_idx,
661 int count,
662 const enum ggml_op * ops,
663 const int * outputs,
664 int num_outputs) {
665 GGML_ASSERT(count < 32);
666 if (node_idx + count > cgraph->n_nodes) {
667 return false;
668 }
669
670 int idxs[32];
671
672 for (int i = 0; i < count; ++i) {
673 idxs[i] = node_idx + i;
674 }
675
676 return ggml_can_fuse_subgraph_ext(cgraph, idxs, count, ops, outputs, num_outputs);
677}
678
679#ifdef __cplusplus
680}
681#endif
682
683#ifdef __cplusplus
684#include <array>
685#include <initializer_list>
686#include <vector>
687
688// nicer C++ syntax for ggml_can_fuse
689inline bool ggml_can_fuse(const struct ggml_cgraph * cgraph, int node_idx, std::initializer_list<enum ggml_op> ops) {
690 return ggml_can_fuse(cgraph, node_idx, ops.begin(), (int)ops.size());
691}
692
693inline bool ggml_can_fuse_subgraph(const struct ggml_cgraph * cgraph,
694 int start_idx,
695 std::initializer_list<enum ggml_op> ops,
696 std::initializer_list<int> outputs = {}) {
697 return ggml_can_fuse_subgraph(cgraph, start_idx, ops.size(), ops.begin(), outputs.begin(), outputs.size());
698}
699
700// Return true if the edges in the graph match expectations.
701inline bool ggml_check_edges(const struct ggml_cgraph * cgraph,
702 int start_idx,
703 std::initializer_list<std::array<int, 3>> edges) {
704 for (const auto & edge : edges) {
705 int dst_node = edge[0];
706 int src_idx = edge[1];
707 int src_node = edge[2];
708 if (cgraph->nodes[start_idx + dst_node]->src[src_idx] != cgraph->nodes[start_idx + src_node]) {
709 return false;
710 }
711 }
712 return true;
713}
714
715// expose GGUF internals for test code
716GGML_API size_t gguf_type_size(enum gguf_type type);
717GGML_API struct gguf_context * gguf_init_from_file_impl(FILE * file, struct gguf_init_params params);
718GGML_API void gguf_write_to_buf(const struct gguf_context * ctx, std::vector<int8_t> & buf, bool only_meta);
719#endif // __cplusplus