1#define _CRT_SECURE_NO_DEPRECATE // Disables "unsafe" warnings on Windows
2#define _USE_MATH_DEFINES // For M_PI on MSVC
3
4#include "ggml-backend-impl.h"
5#include "ggml-backend.h"
6#include "traits.h"
7#include "ggml-cpu-impl.h"
8#include "ggml-impl.h"
9#include "quants.h"
10#include "ggml-threading.h"
11#include "unary-ops.h"
12#include "binary-ops.h"
13#include "vec.h"
14#include "ops.h"
15#include "ggml.h"
16#include "common.h"
17
18#if defined(_MSC_VER) || defined(__MINGW32__)
19#include <malloc.h> // using malloc.h with MSC/MINGW
20#elif !defined(__FreeBSD__) && !defined(__NetBSD__) && !defined(__OpenBSD__)
21#include <alloca.h>
22#endif
23
24#include <assert.h>
25#include <errno.h>
26#include <time.h>
27#include <math.h>
28#include <stdlib.h>
29#include <string.h>
30#include <stdint.h>
31#include <inttypes.h>
32#include <stdio.h>
33#include <float.h>
34#include <limits.h>
35#include <stdarg.h>
36#include <signal.h>
37#if defined(__gnu_linux__)
38#include <syscall.h>
39#endif
40
41#ifdef GGML_USE_OPENMP
42#include <omp.h>
43#endif
44
45#if defined(__ARM_FEATURE_SVE) || defined(__ARM_FEATURE_MATMUL_INT8)
46#undef GGML_USE_LLAMAFILE
47#endif
48
49#ifdef GGML_USE_LLAMAFILE
50#include "llamafile/sgemm.h"
51#endif
52
53// Note: once we move threading into a separate C++ file
54// will use std::hardware_destructive_interference_size instead of hardcoding it here
55// and we'll use C++ attribute syntax.
56#define GGML_CACHE_LINE 64
57
58#if defined(__clang__) || defined(__GNUC__)
59#define GGML_CACHE_ALIGN __attribute__((aligned(GGML_CACHE_LINE)))
60#endif
61
62#if defined(__has_feature)
63#if __has_feature(thread_sanitizer)
64#define GGML_TSAN_ENABLED 1
65#endif
66#else // __has_feature
67#if defined(__SANITIZE_THREAD__)
68#define GGML_TSAN_ENABLED 1
69#endif
70#endif // __has_feature
71
72#define UNUSED GGML_UNUSED
73#define SWAP(x, y, T) do { T SWAP = x; (x) = y; (y) = SWAP; } while (0)
74
75// precomputed f32 table for f16 (256 KB) (simd-mappings.h)
76float ggml_table_f32_f16[1 << 16];
77
78// precomputed f32 table for e8m0 half (1 KB) (simd-mappings.h)
79float ggml_table_f32_e8m0_half[1 << 8];
80
81#if defined(__ARM_ARCH)
82struct ggml_arm_arch_features_type {
83 int sve_cnt;
84} ggml_arm_arch_features = { 0 };
85#endif
86
87#if defined(__riscv)
88struct ggml_riscv_arch_features_type {
89 int rvv_vlen;
90} ggml_riscv_arch_features = { 0 };
91#endif
92
93#if defined(_WIN32)
94
95#define WIN32_LEAN_AND_MEAN
96#ifndef NOMINMAX
97 #define NOMINMAX
98#endif
99#include <windows.h>
100
101#if defined(_MSC_VER) && !defined(__clang__)
102#define GGML_CACHE_ALIGN __declspec(align(GGML_CACHE_LINE))
103
104typedef volatile LONG atomic_int;
105typedef atomic_int atomic_bool;
106typedef atomic_int atomic_flag;
107
108#define ATOMIC_FLAG_INIT 0
109
110typedef enum {
111 memory_order_relaxed,
112 memory_order_consume,
113 memory_order_acquire,
114 memory_order_release,
115 memory_order_acq_rel,
116 memory_order_seq_cst
117} memory_order;
118
119static void atomic_store(atomic_int * ptr, LONG val) {
120 InterlockedExchange(ptr, val);
121}
122static void atomic_store_explicit(atomic_int * ptr, LONG val, memory_order mo) {
123 // TODO: add support for explicit memory order
124 InterlockedExchange(ptr, val);
125}
126static LONG atomic_load(atomic_int * ptr) {
127 return InterlockedCompareExchange(ptr, 0, 0);
128}
129static LONG atomic_load_explicit(atomic_int * ptr, memory_order mo) {
130 // TODO: add support for explicit memory order
131 return InterlockedCompareExchange(ptr, 0, 0);
132}
133static LONG atomic_fetch_add(atomic_int * ptr, LONG inc) {
134 return InterlockedExchangeAdd(ptr, inc);
135}
136static LONG atomic_fetch_add_explicit(atomic_int * ptr, LONG inc, memory_order mo) {
137 // TODO: add support for explicit memory order
138 return InterlockedExchangeAdd(ptr, inc);
139}
140static atomic_bool atomic_flag_test_and_set(atomic_flag * ptr) {
141 return InterlockedExchange(ptr, 1);
142}
143static void atomic_flag_clear(atomic_flag * ptr) {
144 InterlockedExchange(ptr, 0);
145}
146static void atomic_thread_fence(memory_order mo) {
147 MemoryBarrier();
148}
149#else // clang
150#include <stdatomic.h>
151#endif
152
153typedef HANDLE pthread_t;
154
155typedef DWORD thread_ret_t;
156static int pthread_create(pthread_t * out, void * unused, thread_ret_t(*func)(void *), void * arg) {
157 (void) unused;
158 HANDLE handle = CreateThread(NULL, 0, (LPTHREAD_START_ROUTINE) func, arg, 0, NULL);
159 if (handle == NULL)
160 {
161 return EAGAIN;
162 }
163
164 *out = handle;
165 return 0;
166}
167
168static int pthread_join(pthread_t thread, void * unused) {
169 (void) unused;
170 int ret = (int) WaitForSingleObject(thread, INFINITE);
171 CloseHandle(thread);
172 return ret;
173}
174
175static int sched_yield (void) {
176 Sleep (0);
177 return 0;
178}
179#else
180
181#include <pthread.h>
182#include <stdatomic.h>
183#include <sched.h>
184#if defined(__FreeBSD__)
185#include <pthread_np.h>
186#endif
187
188typedef void * thread_ret_t;
189
190#include <sys/types.h>
191#include <sys/stat.h>
192#include <unistd.h>
193
194#endif
195
196typedef pthread_t ggml_thread_t;
197
198#define GGML_THREADPOOL_N_THREADS_MASK (0xffffU)
199#define GGML_THREADPOOL_N_THREADS_BITS (16)
200
201#if defined(__APPLE__)
202#include <unistd.h>
203#include <mach/mach.h>
204#include <TargetConditionals.h>
205#endif
206
207static const struct ggml_type_traits_cpu type_traits_cpu[GGML_TYPE_COUNT] = {
208 [GGML_TYPE_F32] = {
209 .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp32,
210 .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f32,
211 .vec_dot_type = GGML_TYPE_F32,
212 .nrows = 1,
213 },
214 [GGML_TYPE_F16] = {
215 .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_fp16,
216 .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_f16,
217 .vec_dot_type = GGML_TYPE_F16,
218 .nrows = 1,
219 },
220 [GGML_TYPE_Q4_0] = {
221 .from_float = quantize_row_q4_0,
222 .vec_dot = ggml_vec_dot_q4_0_q8_0,
223 .vec_dot_type = GGML_TYPE_Q8_0,
224#if defined (__ARM_FEATURE_MATMUL_INT8)
225 .nrows = 2,
226#else
227 .nrows = 1,
228#endif
229 },
230 [GGML_TYPE_Q4_1] = {
231 .from_float = quantize_row_q4_1,
232 .vec_dot = ggml_vec_dot_q4_1_q8_1,
233 .vec_dot_type = GGML_TYPE_Q8_1,
234#if defined (__ARM_FEATURE_MATMUL_INT8)
235 .nrows = 2,
236#else
237 .nrows = 1,
238#endif
239 },
240 [GGML_TYPE_Q5_0] = {
241 .from_float = quantize_row_q5_0,
242 .vec_dot = ggml_vec_dot_q5_0_q8_0,
243 .vec_dot_type = GGML_TYPE_Q8_0,
244 .nrows = 1,
245 },
246 [GGML_TYPE_Q5_1] = {
247 .from_float = quantize_row_q5_1,
248 .vec_dot = ggml_vec_dot_q5_1_q8_1,
249 .vec_dot_type = GGML_TYPE_Q8_1,
250 .nrows = 1,
251 },
252 [GGML_TYPE_Q8_0] = {
253 .from_float = quantize_row_q8_0,
254 .vec_dot = ggml_vec_dot_q8_0_q8_0,
255 .vec_dot_type = GGML_TYPE_Q8_0,
256#if defined (__ARM_FEATURE_MATMUL_INT8)
257 .nrows = 2,
258#else
259 .nrows = 1,
260#endif
261 },
262 [GGML_TYPE_Q8_1] = {
263 .from_float = quantize_row_q8_1,
264 .vec_dot_type = GGML_TYPE_Q8_1,
265 .nrows = 1,
266 },
267 [GGML_TYPE_MXFP4] = {
268 .from_float = quantize_row_mxfp4,
269 .vec_dot = ggml_vec_dot_mxfp4_q8_0,
270 .vec_dot_type = GGML_TYPE_Q8_0,
271 .nrows = 1,
272 },
273 [GGML_TYPE_Q2_K] = {
274 .from_float = quantize_row_q2_K,
275 .vec_dot = ggml_vec_dot_q2_K_q8_K,
276 .vec_dot_type = GGML_TYPE_Q8_K,
277 .nrows = 1,
278 },
279 [GGML_TYPE_Q3_K] = {
280 .from_float = quantize_row_q3_K,
281 .vec_dot = ggml_vec_dot_q3_K_q8_K,
282 .vec_dot_type = GGML_TYPE_Q8_K,
283 .nrows = 1,
284 },
285 [GGML_TYPE_Q4_K] = {
286 .from_float = quantize_row_q4_K,
287 .vec_dot = ggml_vec_dot_q4_K_q8_K,
288 .vec_dot_type = GGML_TYPE_Q8_K,
289#if defined (__ARM_FEATURE_MATMUL_INT8)
290 .nrows = 2,
291#else
292 .nrows = 1,
293#endif
294 },
295 [GGML_TYPE_Q5_K] = {
296 .from_float = quantize_row_q5_K,
297 .vec_dot = ggml_vec_dot_q5_K_q8_K,
298 .vec_dot_type = GGML_TYPE_Q8_K,
299 .nrows = 1,
300 },
301 [GGML_TYPE_Q6_K] = {
302 .from_float = quantize_row_q6_K,
303 .vec_dot = ggml_vec_dot_q6_K_q8_K,
304 .vec_dot_type = GGML_TYPE_Q8_K,
305#if defined (__ARM_FEATURE_MATMUL_INT8)
306 .nrows = 2,
307#else
308 .nrows = 1,
309#endif
310 },
311 [GGML_TYPE_IQ2_XXS] = {
312 .from_float = NULL,
313 .vec_dot = ggml_vec_dot_iq2_xxs_q8_K,
314 .vec_dot_type = GGML_TYPE_Q8_K,
315 .nrows = 1,
316 },
317 [GGML_TYPE_IQ2_XS] = {
318 .from_float = NULL,
319 .vec_dot = ggml_vec_dot_iq2_xs_q8_K,
320 .vec_dot_type = GGML_TYPE_Q8_K,
321 .nrows = 1,
322 },
323 [GGML_TYPE_IQ3_XXS] = {
324 // NOTE: from_float for iq3 and iq2_s was removed because these quants require initialization in ggml_quantize_init
325 //.from_float = quantize_row_iq3_xxs,
326 .vec_dot = ggml_vec_dot_iq3_xxs_q8_K,
327 .vec_dot_type = GGML_TYPE_Q8_K,
328 .nrows = 1,
329 },
330 [GGML_TYPE_IQ3_S] = {
331 //.from_float = quantize_row_iq3_s,
332 .vec_dot = ggml_vec_dot_iq3_s_q8_K,
333 .vec_dot_type = GGML_TYPE_Q8_K,
334 .nrows = 1,
335 },
336 [GGML_TYPE_IQ2_S] = {
337 //.from_float = quantize_row_iq2_s,
338 .vec_dot = ggml_vec_dot_iq2_s_q8_K,
339 .vec_dot_type = GGML_TYPE_Q8_K,
340 .nrows = 1,
341 },
342 [GGML_TYPE_IQ1_S] = {
343 .from_float = NULL,
344 .vec_dot = ggml_vec_dot_iq1_s_q8_K,
345 .vec_dot_type = GGML_TYPE_Q8_K,
346 .nrows = 1,
347 },
348 [GGML_TYPE_IQ1_M] = {
349 .from_float = NULL,
350 .vec_dot = ggml_vec_dot_iq1_m_q8_K,
351 .vec_dot_type = GGML_TYPE_Q8_K,
352 .nrows = 1,
353 },
354 [GGML_TYPE_IQ4_NL] = {
355 .from_float = quantize_row_iq4_nl,
356 .vec_dot = ggml_vec_dot_iq4_nl_q8_0,
357 .vec_dot_type = GGML_TYPE_Q8_0,
358 .nrows = 1,
359 },
360 [GGML_TYPE_IQ4_XS] = {
361 .from_float = quantize_row_iq4_xs,
362 .vec_dot = ggml_vec_dot_iq4_xs_q8_K,
363 .vec_dot_type = GGML_TYPE_Q8_K,
364 .nrows = 1,
365 },
366 [GGML_TYPE_Q8_K] = {
367 .from_float = quantize_row_q8_K,
368 },
369 [GGML_TYPE_BF16] = {
370 .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_bf16,
371 .vec_dot = (ggml_vec_dot_t) ggml_vec_dot_bf16,
372 .vec_dot_type = GGML_TYPE_BF16,
373 .nrows = 1,
374 },
375 [GGML_TYPE_TQ1_0] = {
376 .from_float = quantize_row_tq1_0,
377 .vec_dot = ggml_vec_dot_tq1_0_q8_K,
378 .vec_dot_type = GGML_TYPE_Q8_K,
379 .nrows = 1,
380 },
381 [GGML_TYPE_TQ2_0] = {
382 .from_float = quantize_row_tq2_0,
383 .vec_dot = ggml_vec_dot_tq2_0_q8_K,
384 .vec_dot_type = GGML_TYPE_Q8_K,
385 .nrows = 1,
386 },
387 [GGML_TYPE_I32] = {
388 .from_float = (ggml_from_float_t) ggml_cpu_fp32_to_i32,
389 },
390};
391
392const struct ggml_type_traits_cpu * ggml_get_type_traits_cpu(enum ggml_type type) {
393 return &type_traits_cpu[type];
394}
395
396//
397// Threading defs
398//
399
400typedef pthread_t ggml_thread_t;
401
402#if defined(_WIN32)
403
404typedef CONDITION_VARIABLE ggml_cond_t;
405typedef SRWLOCK ggml_mutex_t;
406
407#define ggml_mutex_init(m) InitializeSRWLock(m)
408#define ggml_mutex_destroy(m)
409#define ggml_mutex_lock(m) AcquireSRWLockExclusive(m)
410#define ggml_mutex_unlock(m) ReleaseSRWLockExclusive(m)
411#define ggml_mutex_lock_shared(m) AcquireSRWLockShared(m)
412#define ggml_mutex_unlock_shared(m) ReleaseSRWLockShared(m)
413
414#define ggml_cond_init(c) InitializeConditionVariable(c)
415#define ggml_cond_destroy(c)
416#define ggml_cond_wait(c, m) SleepConditionVariableSRW(c, m, INFINITE, CONDITION_VARIABLE_LOCKMODE_SHARED)
417#define ggml_cond_broadcast(c) WakeAllConditionVariable(c)
418
419#define ggml_thread_create pthread_create
420#define ggml_thread_join pthread_join
421
422#else
423
424typedef pthread_cond_t ggml_cond_t;
425typedef pthread_mutex_t ggml_mutex_t;
426
427#define ggml_mutex_init(m) pthread_mutex_init(m, NULL)
428#define ggml_mutex_destroy(m) pthread_mutex_destroy(m)
429#define ggml_mutex_lock(m) pthread_mutex_lock(m)
430#define ggml_mutex_unlock(m) pthread_mutex_unlock(m)
431#define ggml_mutex_lock_shared(m) pthread_mutex_lock(m)
432#define ggml_mutex_unlock_shared(m) pthread_mutex_unlock(m)
433
434#define ggml_lock_init(x) UNUSED(x)
435#define ggml_lock_destroy(x) UNUSED(x)
436#if defined(__x86_64__) || (defined(_MSC_VER) && defined(_M_AMD64))
437#define ggml_lock_lock(x) _mm_pause()
438#else
439#define ggml_lock_lock(x) UNUSED(x)
440#endif
441#define ggml_lock_unlock(x) UNUSED(x)
442
443#define GGML_LOCK_INITIALIZER 0
444#define ggml_cond_init(c) pthread_cond_init(c, NULL)
445#define ggml_cond_destroy(c) pthread_cond_destroy(c)
446#define ggml_cond_wait(c, m) pthread_cond_wait(c, m)
447#define ggml_cond_broadcast(c) pthread_cond_broadcast(c)
448
449#define ggml_thread_create pthread_create
450#define ggml_thread_join pthread_join
451
452#endif
453
454// Threadpool def
455struct ggml_threadpool {
456 ggml_mutex_t mutex; // mutex for cond.var
457 ggml_cond_t cond; // cond.var for waiting for new work
458
459 struct ggml_cgraph * cgraph;
460 struct ggml_cplan * cplan;
461
462 // synchronization primitives
463 atomic_int n_graph; // updated when there is work to be done (i.e each graph) holds graph and active thread counts.
464 atomic_int GGML_CACHE_ALIGN n_barrier;
465 atomic_int GGML_CACHE_ALIGN n_barrier_passed;
466 atomic_int GGML_CACHE_ALIGN current_chunk; // currently processing chunk during Mat_Mul, shared between all the threads.
467
468 // these are atomic as an annotation for thread-sanitizer
469 atomic_bool stop; // Used for stopping the threadpool altogether
470 atomic_bool pause; // Used for pausing the threadpool or individual threads
471 atomic_int abort; // Used for aborting processing of a graph
472
473 struct ggml_compute_state * workers; // per thread state
474 int n_threads; // Number of threads in the pool
475 int32_t prio; // Scheduling priority
476 uint32_t poll; // Polling level (0 - no polling)
477
478 enum ggml_status ec;
479};
480
481// Per-thread state
482struct ggml_compute_state {
483#ifndef GGML_USE_OPENMP
484 ggml_thread_t thrd;
485 int last_graph;
486 bool pending;
487#endif
488 bool cpumask[GGML_MAX_N_THREADS];
489 struct ggml_threadpool * threadpool;
490 int ith;
491};
492
493// Helpers for polling loops
494#if defined(__aarch64__) && ( defined(__clang__) || defined(__GNUC__) )
495static inline void ggml_thread_cpu_relax(void) {
496 __asm__ volatile("yield" ::: "memory");
497}
498#elif defined(__x86_64__)
499static inline void ggml_thread_cpu_relax(void) {
500 _mm_pause();
501}
502#elif defined(__riscv)
503static inline void ggml_thread_cpu_relax(void) {
504 #ifdef __riscv_zihintpause
505 __asm__ __volatile__ ("pause");
506 #else
507 /* Encoding of the pause instruction */
508 __asm__ __volatile__ (".4byte 0x100000F");
509 #endif
510}
511#else
512static inline void ggml_thread_cpu_relax(void) {;}
513#endif
514
515//
516// NUMA support
517//
518
519#define GGML_NUMA_MAX_NODES 8
520#define GGML_NUMA_MAX_CPUS 512
521
522struct ggml_numa_node {
523 uint32_t cpus[GGML_NUMA_MAX_CPUS]; // hardware threads on this node
524 uint32_t n_cpus;
525};
526
527struct ggml_numa_nodes {
528 enum ggml_numa_strategy numa_strategy;
529 struct ggml_numa_node nodes[GGML_NUMA_MAX_NODES];
530 uint32_t n_nodes;
531 uint32_t total_cpus; // hardware threads on system
532 uint32_t current_node; // node on which main process is execting
533#if defined(__gnu_linux__)
534 cpu_set_t cpuset; // cpuset from numactl
535#else
536 uint32_t cpuset; // no NUMA support outside of Linux at this time. Use a portable datatype
537#endif
538};
539
540//
541// ggml state
542//
543
544struct ggml_state {
545 struct ggml_numa_nodes numa;
546};
547
548static struct ggml_state g_state = {0};
549
550void ggml_barrier(struct ggml_threadpool * tp) {
551 int n_threads = atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK;
552 if (n_threads == 1) {
553 return;
554 }
555
556#ifdef GGML_USE_OPENMP
557 #pragma omp barrier
558#else
559 int n_passed = atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed);
560
561 // enter barrier (full seq-cst fence)
562 int n_barrier = atomic_fetch_add_explicit(&tp->n_barrier, 1, memory_order_seq_cst);
563
564 if (n_barrier == (n_threads - 1)) {
565 // last thread
566 atomic_store_explicit(&tp->n_barrier, 0, memory_order_relaxed);
567
568 // exit barrier (full seq-cst fence)
569 atomic_fetch_add_explicit(&tp->n_barrier_passed, 1, memory_order_seq_cst);
570 return;
571 }
572
573 // wait for other threads
574 while (atomic_load_explicit(&tp->n_barrier_passed, memory_order_relaxed) == n_passed) {
575 ggml_thread_cpu_relax();
576 }
577
578 // exit barrier (full seq-cst fence)
579 // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead
580 #ifdef GGML_TSAN_ENABLED
581 atomic_fetch_add_explicit(&tp->n_barrier_passed, 0, memory_order_seq_cst);
582 #else
583 atomic_thread_fence(memory_order_seq_cst);
584 #endif
585#endif
586}
587
588void ggml_threadpool_chunk_set(struct ggml_threadpool * tp, int value) {
589 atomic_store_explicit(&tp->current_chunk, value, memory_order_relaxed);
590}
591
592int ggml_threadpool_chunk_add(struct ggml_threadpool * tp, int value) {
593 return atomic_fetch_add_explicit(&tp->current_chunk, value, memory_order_relaxed);
594}
595
596#if defined(__gnu_linux__)
597static cpu_set_t ggml_get_numa_affinity(void) {
598 cpu_set_t cpuset;
599 pthread_t thread;
600 thread = pthread_self();
601 CPU_ZERO(&cpuset);
602 pthread_getaffinity_np(thread, sizeof(cpu_set_t), &cpuset);
603 return cpuset;
604}
605#else
606static uint32_t ggml_get_numa_affinity(void) {
607 return 0; // no NUMA support
608}
609#endif
610
611void ggml_numa_init(enum ggml_numa_strategy numa_flag) {
612 if (g_state.numa.n_nodes > 0) {
613 fprintf(stderr, "ggml_numa_init: NUMA already initialized\n");
614
615 return;
616 }
617
618#if defined(__gnu_linux__)
619 struct stat st;
620 char path[256];
621 int rv;
622
623 // set numa scheme
624 g_state.numa.numa_strategy = numa_flag;
625
626 GGML_PRINT_DEBUG("numa strategy %u\n",g_state.numa.numa_strategy);
627
628 g_state.numa.cpuset = ggml_get_numa_affinity();
629
630 // enumerate nodes
631 while (g_state.numa.n_nodes < GGML_NUMA_MAX_NODES) {
632 rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u", g_state.numa.n_nodes);
633 GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
634 if (stat(path, &st) != 0) { break; }
635 ++g_state.numa.n_nodes;
636 }
637
638 // enumerate CPUs
639 while (g_state.numa.total_cpus < GGML_NUMA_MAX_CPUS) {
640 rv = snprintf(path, sizeof(path), "/sys/devices/system/cpu/cpu%u", g_state.numa.total_cpus);
641 GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
642 if (stat(path, &st) != 0) { break; }
643 ++g_state.numa.total_cpus;
644 }
645
646 GGML_PRINT_DEBUG("found %u numa nodes, %u CPUs\n", g_state.numa.n_nodes, g_state.numa.total_cpus);
647
648 // figure out which node we're on
649 uint current_cpu;
650 int getcpu_ret = 0;
651#if __GLIBC__ > 2 || (__GLIBC__ == 2 && __GLIBC_MINOR__ > 33) || defined(__COSMOPOLITAN__)
652 getcpu_ret = getcpu(¤t_cpu, &g_state.numa.current_node);
653#else
654 // old glibc doesn't have a wrapper for this call. Fall back on direct syscall
655# if !defined(SYS_getcpu) && defined(SYS_get_cpu)
656# define SYS_getcpu SYS_get_cpu // some older glibc versions use this name
657# endif
658 getcpu_ret = syscall(SYS_getcpu, ¤t_cpu, &g_state.numa.current_node);
659#endif
660
661 if (g_state.numa.n_nodes < 1 || g_state.numa.total_cpus < 1 || getcpu_ret != 0) {
662 g_state.numa.n_nodes = 0;
663 return;
664 }
665
666 GGML_PRINT_DEBUG("found our process on numa node %u, CPU %u\n", g_state.numa.current_node, current_cpu);
667
668 for (uint32_t n = 0; n < g_state.numa.n_nodes; ++n) {
669 struct ggml_numa_node * node = &g_state.numa.nodes[n];
670 GGML_PRINT_DEBUG("CPUs on node %u:", n);
671 node->n_cpus = 0;
672 for (uint32_t c = 0; c < g_state.numa.total_cpus; ++c) {
673 rv = snprintf(path, sizeof(path), "/sys/devices/system/node/node%u/cpu%u", n, c);
674 GGML_ASSERT(rv > 0 && (unsigned)rv < sizeof(path));
675 if (stat(path, &st) == 0) {
676 node->cpus[node->n_cpus++] = c;
677 GGML_PRINT_DEBUG(" %u", c);
678 }
679 }
680 GGML_PRINT_DEBUG("\n");
681 }
682
683 if (ggml_is_numa()) {
684 FILE *fptr = fopen("/proc/sys/kernel/numa_balancing", "r");
685 if (fptr != NULL) {
686 char buf[42];
687 if (fgets(buf, sizeof(buf), fptr) && strncmp(buf, "0\n", sizeof(buf)) != 0) {
688 GGML_LOG_WARN("/proc/sys/kernel/numa_balancing is enabled, this has been observed to impair performance\n");
689 }
690 fclose(fptr);
691 }
692 }
693#else
694 UNUSED(numa_flag);
695 // TODO
696#endif
697}
698
699bool ggml_is_numa(void) {
700 return g_state.numa.n_nodes > 1;
701}
702
703#if defined(__ARM_ARCH)
704#if defined(__aarch64__) && defined(__ARM_FEATURE_SVE)
705#include <arm_sve.h>
706static void ggml_init_arm_arch_features(void) {
707 ggml_arm_arch_features.sve_cnt = svcntb();
708}
709#else
710static void ggml_init_arm_arch_features(void) {}
711#endif
712#endif // __ARM_ARCH
713
714#if defined(__riscv) && defined(__riscv_v_intrinsic)
715#include <riscv_vector.h>
716static void ggml_init_riscv_arch_features(void) {
717 ggml_riscv_arch_features.rvv_vlen = __riscv_vlenb();
718}
719#else
720static void ggml_init_riscv_arch_features(void) {}
721#endif
722
723struct ggml_tensor * ggml_new_i32(struct ggml_context * ctx, int32_t value) {
724 GGML_ASSERT(!ggml_get_no_alloc(ctx));
725
726 struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_I32, 1);
727
728 ggml_set_i32(result, value);
729
730 return result;
731}
732
733struct ggml_tensor * ggml_new_f32(struct ggml_context * ctx, float value) {
734 GGML_ASSERT(!ggml_get_no_alloc(ctx));
735
736 struct ggml_tensor * result = ggml_new_tensor_1d(ctx, GGML_TYPE_F32, 1);
737
738 ggml_set_f32(result, value);
739
740 return result;
741}
742
743struct ggml_tensor * ggml_set_i32 (struct ggml_tensor * tensor, int32_t value) {
744 const int n = ggml_nrows(tensor);
745 const int nc = tensor->ne[0];
746 const size_t n1 = tensor->nb[1];
747
748 char * const data = tensor->data;
749
750 switch (tensor->type) {
751 case GGML_TYPE_I8:
752 {
753 assert(tensor->nb[0] == sizeof(int8_t));
754 for (int i = 0; i < n; i++) {
755 ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
756 }
757 } break;
758 case GGML_TYPE_I16:
759 {
760 assert(tensor->nb[0] == sizeof(int16_t));
761 for (int i = 0; i < n; i++) {
762 ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
763 }
764 } break;
765 case GGML_TYPE_I32:
766 {
767 assert(tensor->nb[0] == sizeof(int32_t));
768 for (int i = 0; i < n; i++) {
769 ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
770 }
771 } break;
772 case GGML_TYPE_F16:
773 {
774 assert(tensor->nb[0] == sizeof(ggml_fp16_t));
775 for (int i = 0; i < n; i++) {
776 ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value));
777 }
778 } break;
779 case GGML_TYPE_BF16:
780 {
781 assert(tensor->nb[0] == sizeof(ggml_fp16_t));
782 for (int i = 0; i < n; i++) {
783 ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
784 }
785 } break;
786 case GGML_TYPE_F32:
787 {
788 assert(tensor->nb[0] == sizeof(float));
789 for (int i = 0; i < n; i++) {
790 ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
791 }
792 } break;
793 default:
794 {
795 GGML_ABORT("fatal error");
796 }
797 }
798
799 return tensor;
800}
801
802struct ggml_tensor * ggml_set_f32(struct ggml_tensor * tensor, float value) {
803 const int n = ggml_nrows(tensor);
804 const int nc = tensor->ne[0];
805 const size_t n1 = tensor->nb[1];
806
807 char * const data = tensor->data;
808
809 switch (tensor->type) {
810 case GGML_TYPE_I8:
811 {
812 assert(tensor->nb[0] == sizeof(int8_t));
813 for (int i = 0; i < n; i++) {
814 ggml_vec_set_i8(nc, (int8_t *)(data + i*n1), value);
815 }
816 } break;
817 case GGML_TYPE_I16:
818 {
819 assert(tensor->nb[0] == sizeof(int16_t));
820 for (int i = 0; i < n; i++) {
821 ggml_vec_set_i16(nc, (int16_t *)(data + i*n1), value);
822 }
823 } break;
824 case GGML_TYPE_I32:
825 {
826 assert(tensor->nb[0] == sizeof(int32_t));
827 for (int i = 0; i < n; i++) {
828 ggml_vec_set_i32(nc, (int32_t *)(data + i*n1), value);
829 }
830 } break;
831 case GGML_TYPE_F16:
832 {
833 assert(tensor->nb[0] == sizeof(ggml_fp16_t));
834 for (int i = 0; i < n; i++) {
835 ggml_vec_set_f16(nc, (ggml_fp16_t *)(data + i*n1), GGML_CPU_FP32_TO_FP16(value));
836 }
837 } break;
838 case GGML_TYPE_BF16:
839 {
840 assert(tensor->nb[0] == sizeof(ggml_bf16_t));
841 for (int i = 0; i < n; i++) {
842 ggml_vec_set_bf16(nc, (ggml_bf16_t *)(data + i*n1), GGML_FP32_TO_BF16(value));
843 }
844 } break;
845 case GGML_TYPE_F32:
846 {
847 assert(tensor->nb[0] == sizeof(float));
848 for (int i = 0; i < n; i++) {
849 ggml_vec_set_f32(nc, (float *)(data + i*n1), value);
850 }
851 } break;
852 default:
853 {
854 GGML_ABORT("fatal error");
855 }
856 }
857
858 return tensor;
859}
860
861int32_t ggml_get_i32_1d(const struct ggml_tensor * tensor, int i) {
862 if (!ggml_is_contiguous(tensor)) {
863 int64_t id[4] = { 0, 0, 0, 0 };
864 ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
865 return ggml_get_i32_nd(tensor, id[0], id[1], id[2], id[3]);
866 }
867 switch (tensor->type) {
868 case GGML_TYPE_I8:
869 {
870 GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
871 return ((int8_t *)(tensor->data))[i];
872 }
873 case GGML_TYPE_I16:
874 {
875 GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
876 return ((int16_t *)(tensor->data))[i];
877 }
878 case GGML_TYPE_I32:
879 {
880 GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
881 return ((int32_t *)(tensor->data))[i];
882 }
883 case GGML_TYPE_F16:
884 {
885 GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
886 return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
887 }
888 case GGML_TYPE_BF16:
889 {
890 GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
891 return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
892 }
893 case GGML_TYPE_F32:
894 {
895 GGML_ASSERT(tensor->nb[0] == sizeof(float));
896 return ((float *)(tensor->data))[i];
897 }
898 default:
899 {
900 GGML_ABORT("fatal error");
901 }
902 }
903}
904
905void ggml_set_i32_1d(const struct ggml_tensor * tensor, int i, int32_t value) {
906 if (!ggml_is_contiguous(tensor)) {
907 int64_t id[4] = { 0, 0, 0, 0 };
908 ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
909 ggml_set_i32_nd(tensor, id[0], id[1], id[2], id[3], value);
910 return;
911 }
912 switch (tensor->type) {
913 case GGML_TYPE_I8:
914 {
915 GGML_ASSERT(tensor->nb[0] == sizeof(int8_t));
916 ((int8_t *)(tensor->data))[i] = value;
917 } break;
918 case GGML_TYPE_I16:
919 {
920 GGML_ASSERT(tensor->nb[0] == sizeof(int16_t));
921 ((int16_t *)(tensor->data))[i] = value;
922 } break;
923 case GGML_TYPE_I32:
924 {
925 GGML_ASSERT(tensor->nb[0] == sizeof(int32_t));
926 ((int32_t *)(tensor->data))[i] = value;
927 } break;
928 case GGML_TYPE_F16:
929 {
930 GGML_ASSERT(tensor->nb[0] == sizeof(ggml_fp16_t));
931 ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value);
932 } break;
933 case GGML_TYPE_BF16:
934 {
935 GGML_ASSERT(tensor->nb[0] == sizeof(ggml_bf16_t));
936 ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
937 } break;
938 case GGML_TYPE_F32:
939 {
940 GGML_ASSERT(tensor->nb[0] == sizeof(float));
941 ((float *)(tensor->data))[i] = value;
942 } break;
943 default:
944 {
945 GGML_ABORT("fatal error");
946 }
947 }
948}
949
950int32_t ggml_get_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
951 void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
952 switch (tensor->type) {
953 case GGML_TYPE_I8:
954 return ((int8_t *) data)[0];
955 case GGML_TYPE_I16:
956 return ((int16_t *) data)[0];
957 case GGML_TYPE_I32:
958 return ((int32_t *) data)[0];
959 case GGML_TYPE_F16:
960 return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
961 case GGML_TYPE_BF16:
962 return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
963 case GGML_TYPE_F32:
964 return ((float *) data)[0];
965 default:
966 GGML_ABORT("fatal error");
967 }
968}
969
970void ggml_set_i32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, int32_t value) {
971 void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
972 switch (tensor->type) {
973 case GGML_TYPE_I8:
974 {
975 ((int8_t *)(data))[0] = value;
976 } break;
977 case GGML_TYPE_I16:
978 {
979 ((int16_t *)(data))[0] = value;
980 } break;
981 case GGML_TYPE_I32:
982 {
983 ((int32_t *)(data))[0] = value;
984 } break;
985 case GGML_TYPE_F16:
986 {
987 ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value);
988 } break;
989 case GGML_TYPE_BF16:
990 {
991 ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
992 } break;
993 case GGML_TYPE_F32:
994 {
995 ((float *)(data))[0] = value;
996 } break;
997 default:
998 {
999 GGML_ABORT("fatal error");
1000 }
1001 }
1002}
1003
1004float ggml_get_f32_1d(const struct ggml_tensor * tensor, int i) {
1005 if (!ggml_is_contiguous(tensor)) {
1006 int64_t id[4] = { 0, 0, 0, 0 };
1007 ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
1008 return ggml_get_f32_nd(tensor, id[0], id[1], id[2], id[3]);
1009 }
1010 switch (tensor->type) {
1011 case GGML_TYPE_I8:
1012 {
1013 return ((int8_t *)(tensor->data))[i];
1014 }
1015 case GGML_TYPE_I16:
1016 {
1017 return ((int16_t *)(tensor->data))[i];
1018 }
1019 case GGML_TYPE_I32:
1020 {
1021 return ((int32_t *)(tensor->data))[i];
1022 }
1023 case GGML_TYPE_F16:
1024 {
1025 return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *)(tensor->data))[i]);
1026 }
1027 case GGML_TYPE_BF16:
1028 {
1029 return GGML_BF16_TO_FP32(((ggml_bf16_t *)(tensor->data))[i]);
1030 }
1031 case GGML_TYPE_F32:
1032 {
1033 return ((float *)(tensor->data))[i];
1034 }
1035 default:
1036 {
1037 GGML_ABORT("fatal error");
1038 }
1039 }
1040}
1041
1042void ggml_set_f32_1d(const struct ggml_tensor * tensor, int i, float value) {
1043 if (!ggml_is_contiguous(tensor)) {
1044 int64_t id[4] = { 0, 0, 0, 0 };
1045 ggml_unravel_index(tensor, i, &id[0], &id[1], &id[2], &id[3]);
1046 ggml_set_f32_nd(tensor, id[0], id[1], id[2], id[3], value);
1047 return;
1048 }
1049 switch (tensor->type) {
1050 case GGML_TYPE_I8:
1051 {
1052 ((int8_t *)(tensor->data))[i] = value;
1053 } break;
1054 case GGML_TYPE_I16:
1055 {
1056 ((int16_t *)(tensor->data))[i] = value;
1057 } break;
1058 case GGML_TYPE_I32:
1059 {
1060 ((int32_t *)(tensor->data))[i] = value;
1061 } break;
1062 case GGML_TYPE_F16:
1063 {
1064 ((ggml_fp16_t *)(tensor->data))[i] = GGML_CPU_FP32_TO_FP16(value);
1065 } break;
1066 case GGML_TYPE_BF16:
1067 {
1068 ((ggml_bf16_t *)(tensor->data))[i] = GGML_FP32_TO_BF16(value);
1069 } break;
1070 case GGML_TYPE_F32:
1071 {
1072 ((float *)(tensor->data))[i] = value;
1073 } break;
1074 default:
1075 {
1076 GGML_ABORT("fatal error");
1077 }
1078 }
1079}
1080
1081float ggml_get_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3) {
1082 void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
1083 switch (tensor->type) {
1084 case GGML_TYPE_I8:
1085 return ((int8_t *) data)[0];
1086 case GGML_TYPE_I16:
1087 return ((int16_t *) data)[0];
1088 case GGML_TYPE_I32:
1089 return ((int32_t *) data)[0];
1090 case GGML_TYPE_F16:
1091 return GGML_CPU_FP16_TO_FP32(((ggml_fp16_t *) data)[0]);
1092 case GGML_TYPE_BF16:
1093 return GGML_BF16_TO_FP32(((ggml_bf16_t *) data)[0]);
1094 case GGML_TYPE_F32:
1095 return ((float *) data)[0];
1096 default:
1097 GGML_ABORT("fatal error");
1098 }
1099}
1100
1101void ggml_set_f32_nd(const struct ggml_tensor * tensor, int i0, int i1, int i2, int i3, float value) {
1102 void * data = (char *) tensor->data + i0*tensor->nb[0] + i1*tensor->nb[1] + i2*tensor->nb[2] + i3*tensor->nb[3];
1103 switch (tensor->type) {
1104 case GGML_TYPE_I8:
1105 {
1106 ((int8_t *)(data))[0] = value;
1107 } break;
1108 case GGML_TYPE_I16:
1109 {
1110 ((int16_t *)(data))[0] = value;
1111 } break;
1112 case GGML_TYPE_I32:
1113 {
1114 ((int32_t *)(data))[0] = value;
1115 } break;
1116 case GGML_TYPE_F16:
1117 {
1118 ((ggml_fp16_t *)(data))[0] = GGML_CPU_FP32_TO_FP16(value);
1119 } break;
1120 case GGML_TYPE_BF16:
1121 {
1122 ((ggml_bf16_t *)(data))[0] = GGML_FP32_TO_BF16(value);
1123 } break;
1124 case GGML_TYPE_F32:
1125 {
1126 ((float *)(data))[0] = value;
1127 } break;
1128 default:
1129 {
1130 GGML_ABORT("fatal error");
1131 }
1132 }
1133}
1134
1135////////////////////////////////////////////////////////////////////////////////
1136
1137// ggml_compute_forward_mul_mat
1138
1139static void ggml_compute_forward_mul_mat_one_chunk(
1140 const struct ggml_compute_params * params,
1141 struct ggml_tensor * dst,
1142 const enum ggml_type type,
1143 const int64_t num_rows_per_vec_dot,
1144 const int64_t ir0_start,
1145 const int64_t ir0_end,
1146 const int64_t ir1_start,
1147 const int64_t ir1_end) {
1148
1149 const struct ggml_tensor * src0 = dst->src[0];
1150 const struct ggml_tensor * src1 = dst->src[1];
1151
1152 GGML_TENSOR_BINARY_OP_LOCALS
1153
1154 const bool src1_cont = ggml_is_contiguous(src1);
1155
1156 ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
1157 enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
1158
1159 // broadcast factors
1160 const int64_t r2 = ne12 / ne02;
1161 const int64_t r3 = ne13 / ne03;
1162
1163 //printf("ir0_start = %6lld, ir0_end = %6lld, ir1_start = %6lld, ir1_end = %6lld\n", ir0_start, ir0_end, ir1_start, ir1_end);
1164
1165 // threads with no work simply yield (not sure if it helps)
1166 if (ir0_start >= ir0_end || ir1_start >= ir1_end) {
1167 return;
1168 }
1169
1170 const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
1171 const size_t row_size = ggml_row_size(vec_dot_type, ne10);
1172
1173 assert(ne12 % ne02 == 0);
1174 assert(ne13 % ne03 == 0);
1175
1176 // block-tiling attempt
1177 const int64_t blck_0 = 16;
1178 const int64_t blck_1 = 16;
1179
1180 const size_t src1_col_stride = src1_cont || src1->type != vec_dot_type ? row_size : nb11;
1181
1182 // attempt to reduce false-sharing (does not seem to make a difference)
1183 // 16 * 2, accounting for mmla kernels
1184 float tmp[32];
1185
1186 for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) {
1187 for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) {
1188 for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ir1 += num_rows_per_vec_dot) {
1189 const int64_t i13 = (ir1 / (ne12 * ne1));
1190 const int64_t i12 = (ir1 - i13 * ne12 * ne1) / ne1;
1191 const int64_t i11 = (ir1 - i13 * ne12 * ne1 - i12 * ne1);
1192
1193 // broadcast src0 into src1
1194 const int64_t i03 = i13 / r3;
1195 const int64_t i02 = i12 / r2;
1196
1197 const int64_t i1 = i11;
1198 const int64_t i2 = i12;
1199 const int64_t i3 = i13;
1200
1201 const char * src0_row = (const char*)src0->data + (0 + i02 * nb02 + i03 * nb03);
1202
1203 // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
1204 // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
1205 // the original src1 data pointer, so we should index using the indices directly
1206 // TODO: this is a bit of a hack, we should probably have a better way to handle this
1207 const char * src1_col = (const char*)wdata +
1208 (src1_cont || src1->type != vec_dot_type
1209 ? (i11 + i12 * ne11 + i13 * ne12 * ne11) * row_size
1210 : (i11 * nb11 + i12 * nb12 + i13 * nb13));
1211 float * dst_col = (float*)((char*)dst->data + (i1 * nb1 + i2 * nb2 + i3 * nb3));
1212
1213 //for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) {
1214 // vec_dot(ne00, &dst_col[ir0], src0_row + ir0*nb01, src1_col);
1215 //}
1216
1217 for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ir0 += num_rows_per_vec_dot) {
1218 vec_dot(ne00, &tmp[ir0 - iir0], (num_rows_per_vec_dot > 1 ? 16 : 0), src0_row + ir0 * nb01, (num_rows_per_vec_dot > 1 ? nb01 : 0), src1_col, (num_rows_per_vec_dot > 1 ? src1_col_stride : 0), num_rows_per_vec_dot);
1219 }
1220
1221 for (int cn = 0; cn < num_rows_per_vec_dot; ++cn) {
1222 memcpy(&dst_col[iir0 + cn * nb1 / nb0], tmp + (cn * 16), (MIN(iir0 + blck_0, ir0_end) - iir0) * sizeof(float));
1223 }
1224 }
1225 }
1226 }
1227}
1228
1229void ggml_compute_forward_mul_mat(
1230 const struct ggml_compute_params * params,
1231 struct ggml_tensor * dst) {
1232
1233 const struct ggml_tensor * src0 = dst->src[0];
1234 const struct ggml_tensor * src1 = dst->src[1];
1235
1236 GGML_TENSOR_BINARY_OP_LOCALS
1237
1238 const int ith = params->ith;
1239 const int nth = params->nth;
1240
1241 enum ggml_type const vec_dot_type = type_traits_cpu[src0->type].vec_dot_type;
1242 ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
1243 int64_t const vec_dot_num_rows = type_traits_cpu[src0->type].nrows;
1244
1245 GGML_ASSERT(ne0 == ne01);
1246 GGML_ASSERT(ne1 == ne11);
1247 GGML_ASSERT(ne2 == ne12);
1248 GGML_ASSERT(ne3 == ne13);
1249
1250 // we don't support permuted src0 or src1
1251 GGML_ASSERT(nb00 == ggml_type_size(src0->type));
1252 GGML_ASSERT(nb10 == ggml_type_size(src1->type));
1253
1254 // dst cannot be transposed or permuted
1255 GGML_ASSERT(nb0 == sizeof(float));
1256 GGML_ASSERT(nb0 <= nb1);
1257 GGML_ASSERT(nb1 <= nb2);
1258 GGML_ASSERT(nb2 <= nb3);
1259
1260 // nb01 >= nb00 - src0 is not transposed
1261 // compute by src0 rows
1262
1263 // TODO: extract to "extra_op"
1264#if GGML_USE_LLAMAFILE
1265 // broadcast factors
1266 const int64_t r2 = ne12 / ne02;
1267 const int64_t r3 = ne13 / ne03;
1268
1269 const bool src1_cont = ggml_is_contiguous(src1);
1270
1271 if (src1_cont) {
1272 for (int64_t i13 = 0; i13 < ne13; i13++)
1273 for (int64_t i12 = 0; i12 < ne12; i12++)
1274 if (!llamafile_sgemm(params,
1275 ne01, ne11, ne00/ggml_blck_size(src0->type),
1276 (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
1277 nb01/ggml_type_size(src0->type),
1278 (const char *)src1->data + i12*nb12 + i13*nb13,
1279 nb11/ggml_type_size(src1->type),
1280 (char *)dst->data + i12*nb2 + i13*nb3,
1281 nb1/ggml_type_size(dst->type),
1282 src0->type,
1283 src1->type,
1284 dst->type))
1285 goto UseGgmlGemm1;
1286 return;
1287 }
1288UseGgmlGemm1:;
1289#endif
1290
1291 if (src1->type != vec_dot_type) {
1292 char * wdata = params->wdata;
1293
1294 const size_t nbw0 = ggml_type_size(vec_dot_type);
1295 const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
1296 const size_t nbw2 = nbw1*ne11;
1297 const size_t nbw3 = nbw2*ne12;
1298
1299 assert(params->wsize >= ne13*nbw3);
1300 GGML_ASSERT(src1->type == GGML_TYPE_F32);
1301
1302 #if 0
1303 for (int64_t i13 = 0; i13 < ne13; ++i13) {
1304 for (int64_t i12 = 0; i12 < ne12; ++i12) {
1305 for (int64_t i11 = ith; i11 < ne11; i11 += nth) {
1306 from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
1307 (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
1308 ne10);
1309 }
1310 }
1311 }
1312 #else
1313 for (int64_t i13 = 0; i13 < ne13; ++i13) {
1314 for (int64_t i12 = 0; i12 < ne12; ++i12) {
1315 for (int64_t i11 = 0; i11 < ne11; ++i11) {
1316 size_t bs = ggml_blck_size(vec_dot_type);
1317 int64_t ne10_block_start = (ith * ne10/bs) / nth;
1318 int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
1319 from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
1320 (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
1321 (ne10_block_end - ne10_block_start) * bs);
1322 }
1323 }
1324 }
1325 #endif
1326 }
1327
1328 if (ith == 0) {
1329 // Every thread starts at ith, so the first unprocessed chunk is nth. This save a bit of coordination right at the start.
1330 atomic_store_explicit(¶ms->threadpool->current_chunk, nth, memory_order_relaxed);
1331 }
1332
1333 ggml_barrier(params->threadpool);
1334
1335#if GGML_USE_LLAMAFILE
1336 if (src1->type != vec_dot_type) {
1337 const void* wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
1338 const size_t row_size = ggml_row_size(vec_dot_type, ne10);
1339
1340 for (int64_t i13 = 0; i13 < ne13; i13++)
1341 for (int64_t i12 = 0; i12 < ne12; i12++)
1342 if (!llamafile_sgemm(params,
1343 ne01, ne11, ne00/ggml_blck_size(src0->type),
1344 (const char *)src0->data + i12/r2*nb02 + i13/r3*nb03,
1345 nb01/ggml_type_size(src0->type),
1346 (const char *)wdata + (i12*ne11 + i13*ne12*ne11)*row_size,
1347 row_size/ggml_type_size(vec_dot_type),
1348 (char *)dst->data + i12*nb2 + i13*nb3,
1349 nb1/ggml_type_size(dst->type),
1350 src0->type,
1351 vec_dot_type,
1352 dst->type))
1353 goto UseGgmlGemm2;
1354 return;
1355 }
1356UseGgmlGemm2:;
1357#endif
1358
1359 // This is the size of the first dimension of the result, so we can iterate that way. (see the ASSERT above, these are the same numbers)
1360 const int64_t nr0 = ne0;
1361
1362 // This is the size of the rest of the dimensions of the result
1363 const int64_t nr1 = ne1 * ne2 * ne3;
1364
1365 // Now select a reasonable chunk size.
1366 int chunk_size = 16;
1367
1368 // We need to step up the size if it's small
1369 if (nr0 == 1 || nr1 == 1) {
1370 chunk_size = 64;
1371 }
1372
1373 // distribute the work across the inner or outer loop based on which one is larger
1374 // The number of chunks in the 0/1 dim.
1375 // CEIL(nr0/chunk_size)
1376 int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size;
1377 int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size;
1378
1379 // If the chunking is poor for the number of threads on this setup, scrap the whole plan. Re-chunk it by thread.
1380 // Also, chunking by thread was measured to have perform better on NUMA systems. See https://github.com/ggml-org/llama.cpp/pull/6915
1381 // In theory, chunking should be just as useful on NUMA and non NUMA systems, but testing disagreed with that.
1382 if (nchunk0 * nchunk1 < nth * 4 || ggml_is_numa()) {
1383 // distribute the thread work across the inner or outer loop based on which one is larger
1384 nchunk0 = nr0 > nr1 ? nth : 1; // parallelize by src0 rows
1385 nchunk1 = nr0 > nr1 ? 1 : nth; // parallelize by src1 rows
1386 }
1387
1388 // The number of elements in each chunk
1389 const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
1390 const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
1391
1392 // The first chunk comes from our thread_id, the rest will get auto-assigned.
1393 int current_chunk = ith;
1394
1395 while (current_chunk < nchunk0 * nchunk1) {
1396 const int64_t ith0 = current_chunk % nchunk0;
1397 const int64_t ith1 = current_chunk / nchunk0;
1398
1399 const int64_t ir0_start = dr0 * ith0;
1400 const int64_t ir0_end = MIN(ir0_start + dr0, nr0);
1401
1402 const int64_t ir1_start = dr1 * ith1;
1403 const int64_t ir1_end = MIN(ir1_start + dr1, nr1);
1404
1405 // dot kernels can handle 1 row and col at a time, but mmla kernels can process 2 rows and cols
1406 int64_t num_rows_per_vec_dot = vec_dot_num_rows;
1407
1408 // these checks are needed to avoid crossing dim1 boundaries
1409 // can be optimized, but the logic would become more complicated, so keeping it like this for simplicity
1410 if ((nr0 % 2 != 0) || (ne11 % 2 != 0) || ((ir0_end - ir0_start) % 2 != 0) || ((ir1_end - ir1_start) % 2 != 0)) {
1411 num_rows_per_vec_dot = 1;
1412 }
1413 ggml_compute_forward_mul_mat_one_chunk(params, dst, src0->type, num_rows_per_vec_dot, ir0_start, ir0_end, ir1_start, ir1_end);
1414
1415 if (nth >= nchunk0 * nchunk1) {
1416 break;
1417 }
1418
1419 current_chunk = atomic_fetch_add_explicit(¶ms->threadpool->current_chunk, 1, memory_order_relaxed);
1420 }
1421}
1422
1423// ggml_compute_forward_mul_mat_id
1424
1425#define MMID_MATRIX_ROW(row_id, i1) matrix_rows[(row_id)*ids->ne[0]*ids->ne[1] + (i1)]
1426
1427struct mmid_row_mapping {
1428 int32_t i1;
1429 int32_t i2;
1430};
1431
1432static void ggml_compute_forward_mul_mat_id_one_chunk(
1433 struct ggml_tensor * dst,
1434 const struct ggml_tensor * src0,
1435 const struct ggml_tensor * src1,
1436 const struct ggml_tensor * ids,
1437 const int64_t cur_a,
1438 const int64_t ir0_start,
1439 const int64_t ir0_end,
1440 const int64_t ir1_start,
1441 const int64_t ir1_end,
1442 const char * src0_cur,
1443 const struct mmid_row_mapping * matrix_rows,
1444 const size_t row_size,
1445 const bool src1_cont,
1446 const void * wdata) {
1447
1448 GGML_TENSOR_BINARY_OP_LOCALS
1449
1450 const enum ggml_type type = src0->type;
1451
1452 ggml_vec_dot_t const vec_dot = type_traits_cpu[type].vec_dot;
1453 enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
1454
1455 const int64_t blck_0 = 16;
1456 const int64_t blck_1 = 16;
1457
1458 float tmp[16];
1459
1460 for (int64_t iir1 = ir1_start; iir1 < ir1_end; iir1 += blck_1) {
1461 for (int64_t iir0 = ir0_start; iir0 < ir0_end; iir0 += blck_0) {
1462 for (int64_t ir1 = iir1; ir1 < iir1 + blck_1 && ir1 < ir1_end; ++ir1) {
1463 const int64_t _i12 = ir1; // logical row index for this expert
1464
1465 struct mmid_row_mapping row_mapping = MMID_MATRIX_ROW(cur_a, _i12);
1466 const int id = row_mapping.i1; // selected expert index
1467
1468 const int64_t i11 = id % ne11;
1469 const int64_t i12 = row_mapping.i2; // row index in src1
1470
1471 const int64_t i1 = id; // selected expert index
1472 const int64_t i2 = i12; // row
1473
1474 // desc: when src1 is not a contiguous memory block we have to calculate the offset using the strides
1475 // if it is, then we have either copied the data to params->wdata and made it contiguous or we are using
1476 // the original src1 data pointer, so we should index using the indices directly
1477 // TODO: this is a bit of a hack, we should probably have a better way to handle this
1478 const char * src1_col = (const char *) wdata +
1479 (src1_cont || src1->type != vec_dot_type
1480 ? (i11 + i12*ne11)*row_size
1481 : (i11*nb11 + i12*nb12));
1482
1483 float * dst_col = (float *) ((char *) dst->data + (i1*nb1 + i2*nb2));
1484
1485 for (int64_t ir0 = iir0; ir0 < iir0 + blck_0 && ir0 < ir0_end; ++ir0) {
1486 vec_dot(ne00, &tmp[ir0 - iir0], 0, src0_cur + ir0*nb01, 0, src1_col, 0, 1);
1487 }
1488
1489 memcpy(&dst_col[iir0], tmp, (MIN(iir0 + blck_0, ir0_end) - iir0)*sizeof(float));
1490 }
1491 }
1492 }
1493}
1494
1495static void * incr_ptr_aligned(void ** p, size_t size, size_t align) {
1496
1497 void * ptr = *p;
1498 ptr = (void *) GGML_PAD((uintptr_t) ptr, align);
1499 *p = (void *) ((char *) ptr + size);
1500 return ptr;
1501}
1502
1503static void ggml_compute_forward_mul_mat_id(
1504 const struct ggml_compute_params * params,
1505 struct ggml_tensor * dst) {
1506
1507 const struct ggml_tensor * src0 = dst->src[0];
1508 const struct ggml_tensor * src1 = dst->src[1];
1509 const struct ggml_tensor * ids = dst->src[2];
1510
1511 GGML_TENSOR_BINARY_OP_LOCALS
1512
1513 const int ith = params->ith;
1514 const int nth = params->nth;
1515
1516 const enum ggml_type type = src0->type;
1517
1518 const bool src1_cont = ggml_is_contiguous(src1);
1519
1520 enum ggml_type const vec_dot_type = type_traits_cpu[type].vec_dot_type;
1521 ggml_from_float_t const from_float = type_traits_cpu[vec_dot_type].from_float;
1522
1523 // we don't support permuted src0 or src1
1524 GGML_ASSERT(nb00 == ggml_type_size(type));
1525 GGML_ASSERT(nb10 == ggml_type_size(src1->type));
1526
1527 // dst cannot be transposed or permuted
1528 GGML_ASSERT(nb0 == sizeof(float));
1529 GGML_ASSERT(nb0 <= nb1);
1530 GGML_ASSERT(nb1 <= nb2);
1531 GGML_ASSERT(nb2 <= nb3);
1532
1533 // row groups
1534 const int n_ids = ids->ne[0]; // n_expert_used
1535 const int n_as = ne02; // n_expert
1536
1537 void * wdata_cur = params->wdata;
1538
1539 if (src1->type != vec_dot_type) {
1540 incr_ptr_aligned(&wdata_cur, ggml_row_size(vec_dot_type, ggml_nelements(src1)), sizeof(int64_t));
1541 }
1542
1543 int64_t * matrix_row_counts = // [n_as]
1544 incr_ptr_aligned(&wdata_cur, n_as*sizeof(int64_t), sizeof(int64_t));
1545
1546 struct mmid_row_mapping * matrix_rows = // [n_as][ids->ne[0]*ids->ne[1]]
1547 incr_ptr_aligned(&wdata_cur, n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping), sizeof(int64_t));
1548
1549 char (*atomic_current_chunk)[CACHE_LINE_SIZE] = // [n_as]
1550 incr_ptr_aligned(&wdata_cur, CACHE_LINE_SIZE * n_as, CACHE_LINE_SIZE);
1551
1552 GGML_ASSERT(params->wsize >= (size_t)((char *) wdata_cur - (char *) params->wdata));
1553
1554 if (src1->type != vec_dot_type) {
1555 char * wdata = params->wdata;
1556
1557 const size_t nbw0 = ggml_type_size(vec_dot_type);
1558 const size_t nbw1 = ggml_row_size(vec_dot_type, ne10);
1559 const size_t nbw2 = nbw1*ne11;
1560 const size_t nbw3 = nbw2*ne12;
1561
1562 assert(params->wsize >= ne13*nbw3);
1563 GGML_ASSERT(src1->type == GGML_TYPE_F32);
1564
1565#if 0
1566 for (int64_t i13 = 0; i13 < ne13; ++i13) {
1567 for (int64_t i12 = ith; i12 < ne12; i12 += nth) {
1568 for (int64_t i11 = 0; i11 < ne11; ++i11) {
1569 from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11),
1570 (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1),
1571 ne10);
1572 }
1573 }
1574 }
1575#else
1576 for (int64_t i13 = 0; i13 < ne13; ++i13) {
1577 for (int64_t i12 = 0; i12 < ne12; ++i12) {
1578 for (int64_t i11 = 0; i11 < ne11; ++i11) {
1579 size_t bs = ggml_blck_size(vec_dot_type);
1580 int64_t ne10_block_start = (ith * ne10/bs) / nth;
1581 int64_t ne10_block_end = ((ith + 1) * ne10/bs) / nth;
1582 from_float((float *)((char *) src1->data + i13*nb13 + i12*nb12 + i11*nb11 + ne10_block_start*bs*nb10),
1583 (void *) (wdata + i13*nbw3 + i12*nbw2 + i11*nbw1 + ne10_block_start*nbw0),
1584 (ne10_block_end - ne10_block_start) * bs);
1585 }
1586 }
1587 }
1588#endif
1589 }
1590
1591 if (ith == 0) {
1592 // initialize matrix_row_counts
1593 memset(matrix_row_counts, 0, n_as*sizeof(int64_t));
1594
1595 // group rows by src0 matrix
1596 for (int64_t iid1 = 0; iid1 < ids->ne[1]; ++iid1) {
1597 for (int id = 0; id < n_ids; ++id) {
1598 const int32_t i02 = *(const int32_t *) ((const char *) ids->data + iid1*ids->nb[1] + id*ids->nb[0]);
1599
1600 assert(i02 >= 0 && i02 < n_as);
1601
1602 MMID_MATRIX_ROW(i02, matrix_row_counts[i02]) = (struct mmid_row_mapping) {id, iid1};
1603 matrix_row_counts[i02] += 1;
1604 }
1605 }
1606 }
1607
1608 // reset current_chunk
1609 for (int cur_a = ith; cur_a < n_as; cur_a += nth) {
1610 atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
1611 *current_chunk_ctr = nth;
1612 }
1613
1614 ggml_barrier(params->threadpool);
1615
1616 for (int cur_a = 0; cur_a < n_as; ++cur_a) {
1617 const int64_t cne1 = matrix_row_counts[cur_a];
1618
1619 if (cne1 == 0) {
1620 continue;
1621 }
1622
1623 const char * src0_cur = (const char *) src0->data + cur_a * nb02;
1624 const void * wdata = (src1->type == vec_dot_type) ? src1->data : params->wdata;
1625 const size_t row_size = ggml_row_size(vec_dot_type, ne10);
1626
1627 const int64_t nr0 = ne01;
1628 const int64_t nr1 = cne1;
1629
1630 int chunk_size = 16;
1631 if (nr0 == 1 || nr1 == 1) {
1632 chunk_size = 64;
1633 }
1634
1635 // disable for NUMA
1636 const bool disable_chunking = ggml_is_numa();
1637
1638 int64_t nchunk0 = (nr0 + chunk_size - 1) / chunk_size;
1639 int64_t nchunk1 = (nr1 + chunk_size - 1) / chunk_size;
1640
1641 if (nchunk0 * nchunk1 < nth * 4 || disable_chunking) {
1642 nchunk0 = nr0 > nr1 ? nth : 1;
1643 nchunk1 = nr0 > nr1 ? 1 : nth;
1644 }
1645
1646 const int64_t dr0 = (nr0 + nchunk0 - 1) / nchunk0;
1647 const int64_t dr1 = (nr1 + nchunk1 - 1) / nchunk1;
1648
1649 int current_chunk = ith;
1650
1651 atomic_int * current_chunk_ctr = (atomic_int *)(atomic_current_chunk + cur_a);
1652
1653 while (current_chunk < nchunk0 * nchunk1) {
1654 const int64_t ith0 = current_chunk % nchunk0;
1655 const int64_t ith1 = current_chunk / nchunk0;
1656
1657 const int64_t ir0_start = dr0 * ith0;
1658 const int64_t ir0_end = MIN(ir0_start + dr0, nr0);
1659
1660 const int64_t ir1_start = dr1 * ith1;
1661 const int64_t ir1_end = MIN(ir1_start + dr1, nr1);
1662
1663 ggml_compute_forward_mul_mat_id_one_chunk(
1664 dst, src0, src1, ids, cur_a,
1665 ir0_start, ir0_end, ir1_start, ir1_end,
1666 src0_cur, matrix_rows, row_size, src1_cont, wdata
1667 );
1668
1669 if (nth >= nchunk0 * nchunk1) {
1670 break;
1671 }
1672
1673 current_chunk = atomic_fetch_add_explicit(current_chunk_ctr, 1, memory_order_relaxed);
1674 }
1675 }
1676}
1677
1678/////////////////////////////////
1679
1680static void ggml_compute_forward(struct ggml_compute_params * params, struct ggml_tensor * tensor) {
1681 GGML_ASSERT(params);
1682
1683 if (tensor->op == GGML_OP_NONE || ggml_is_empty(tensor)) {
1684 return;
1685 }
1686
1687 // extra_buffer op?
1688 if (ggml_cpu_extra_compute_forward(params, tensor)) {
1689 return;
1690 }
1691
1692 switch (tensor->op) {
1693 case GGML_OP_DUP:
1694 {
1695 ggml_compute_forward_dup(params, tensor);
1696 } break;
1697 case GGML_OP_ADD:
1698 {
1699 ggml_compute_forward_add(params, tensor);
1700 } break;
1701 case GGML_OP_ADD_ID:
1702 {
1703 ggml_compute_forward_add_id(params, tensor);
1704 } break;
1705 case GGML_OP_ADD1:
1706 {
1707 ggml_compute_forward_add1(params, tensor);
1708 } break;
1709 case GGML_OP_ACC:
1710 {
1711 ggml_compute_forward_acc(params, tensor);
1712 } break;
1713 case GGML_OP_SUB:
1714 {
1715 ggml_compute_forward_sub(params, tensor);
1716 } break;
1717 case GGML_OP_MUL:
1718 {
1719 ggml_compute_forward_mul(params, tensor);
1720 } break;
1721 case GGML_OP_DIV:
1722 {
1723 ggml_compute_forward_div(params, tensor);
1724 } break;
1725 case GGML_OP_SQR:
1726 {
1727 ggml_compute_forward_sqr(params, tensor);
1728 } break;
1729 case GGML_OP_SQRT:
1730 {
1731 ggml_compute_forward_sqrt(params, tensor);
1732 } break;
1733 case GGML_OP_LOG:
1734 {
1735 ggml_compute_forward_log(params, tensor);
1736 } break;
1737 case GGML_OP_SIN:
1738 {
1739 ggml_compute_forward_sin(params, tensor);
1740 } break;
1741 case GGML_OP_COS:
1742 {
1743 ggml_compute_forward_cos(params, tensor);
1744 } break;
1745 case GGML_OP_SUM:
1746 {
1747 ggml_compute_forward_sum(params, tensor);
1748 } break;
1749 case GGML_OP_SUM_ROWS:
1750 {
1751 ggml_compute_forward_sum_rows(params, tensor);
1752 } break;
1753 case GGML_OP_CUMSUM:
1754 {
1755 ggml_compute_forward_cumsum(params, tensor);
1756 } break;
1757 case GGML_OP_MEAN:
1758 {
1759 ggml_compute_forward_mean(params, tensor);
1760 } break;
1761 case GGML_OP_ARGMAX:
1762 {
1763 ggml_compute_forward_argmax(params, tensor);
1764 } break;
1765 case GGML_OP_COUNT_EQUAL:
1766 {
1767 ggml_compute_forward_count_equal(params, tensor);
1768 } break;
1769 case GGML_OP_REPEAT:
1770 {
1771 ggml_compute_forward_repeat(params, tensor);
1772 } break;
1773 case GGML_OP_REPEAT_BACK:
1774 {
1775 ggml_compute_forward_repeat_back(params, tensor);
1776 } break;
1777 case GGML_OP_CONCAT:
1778 {
1779 ggml_compute_forward_concat(params, tensor);
1780 } break;
1781 case GGML_OP_SILU_BACK:
1782 {
1783 ggml_compute_forward_silu_back(params, tensor);
1784 } break;
1785 case GGML_OP_NORM:
1786 {
1787 ggml_compute_forward_norm(params, tensor);
1788 } break;
1789 case GGML_OP_RMS_NORM:
1790 {
1791 ggml_compute_forward_rms_norm(params, tensor);
1792 } break;
1793 case GGML_OP_RMS_NORM_BACK:
1794 {
1795 ggml_compute_forward_rms_norm_back(params, tensor);
1796 } break;
1797 case GGML_OP_GROUP_NORM:
1798 {
1799 ggml_compute_forward_group_norm(params, tensor);
1800 } break;
1801 case GGML_OP_L2_NORM:
1802 {
1803 ggml_compute_forward_l2_norm(params, tensor);
1804 } break;
1805 case GGML_OP_MUL_MAT:
1806 {
1807 ggml_compute_forward_mul_mat(params, tensor);
1808 } break;
1809 case GGML_OP_MUL_MAT_ID:
1810 {
1811 ggml_compute_forward_mul_mat_id(params, tensor);
1812 } break;
1813 case GGML_OP_OUT_PROD:
1814 {
1815 ggml_compute_forward_out_prod(params, tensor);
1816 } break;
1817 case GGML_OP_SCALE:
1818 {
1819 ggml_compute_forward_scale(params, tensor);
1820 } break;
1821 case GGML_OP_SET:
1822 {
1823 ggml_compute_forward_set(params, tensor);
1824 } break;
1825 case GGML_OP_CPY:
1826 {
1827 ggml_compute_forward_cpy(params, tensor);
1828 } break;
1829 case GGML_OP_CONT:
1830 {
1831 ggml_compute_forward_cont(params, tensor);
1832 } break;
1833 case GGML_OP_GET_ROWS:
1834 {
1835 ggml_compute_forward_get_rows(params, tensor);
1836 } break;
1837 case GGML_OP_GET_ROWS_BACK:
1838 {
1839 ggml_compute_forward_get_rows_back(params, tensor);
1840 } break;
1841 case GGML_OP_SET_ROWS:
1842 {
1843 ggml_compute_forward_set_rows(params, tensor);
1844 } break;
1845 case GGML_OP_DIAG:
1846 {
1847 ggml_compute_forward_diag(params, tensor);
1848 } break;
1849 case GGML_OP_DIAG_MASK_INF:
1850 {
1851 ggml_compute_forward_diag_mask_inf(params, tensor);
1852 } break;
1853 case GGML_OP_DIAG_MASK_ZERO:
1854 {
1855 ggml_compute_forward_diag_mask_zero(params, tensor);
1856 } break;
1857 case GGML_OP_SOFT_MAX:
1858 {
1859 ggml_compute_forward_soft_max(params, tensor);
1860 } break;
1861 case GGML_OP_SOFT_MAX_BACK:
1862 {
1863 ggml_compute_forward_soft_max_ext_back(params, tensor);
1864 } break;
1865 case GGML_OP_ROPE:
1866 {
1867 ggml_compute_forward_rope(params, tensor);
1868 } break;
1869 case GGML_OP_ROPE_BACK:
1870 {
1871 ggml_compute_forward_rope_back(params, tensor);
1872 } break;
1873 case GGML_OP_CLAMP:
1874 {
1875 ggml_compute_forward_clamp(params, tensor);
1876 } break;
1877 case GGML_OP_CONV_TRANSPOSE_1D:
1878 {
1879 ggml_compute_forward_conv_transpose_1d(params, tensor);
1880 } break;
1881 case GGML_OP_IM2COL:
1882 {
1883 ggml_compute_forward_im2col(params, tensor);
1884 } break;
1885 case GGML_OP_IM2COL_BACK:
1886 {
1887 ggml_compute_forward_im2col_back_f32(params, tensor);
1888 } break;
1889 case GGML_OP_IM2COL_3D:
1890 {
1891 ggml_compute_forward_im2col_3d(params, tensor);
1892 } break;
1893 case GGML_OP_CONV_2D:
1894 {
1895 ggml_compute_forward_conv_2d(params, tensor);
1896 } break;
1897 case GGML_OP_CONV_3D:
1898 {
1899 ggml_compute_forward_conv_3d(params, tensor);
1900 } break;
1901 case GGML_OP_CONV_2D_DW:
1902 {
1903 ggml_compute_forward_conv_2d_dw(params, tensor);
1904 } break;
1905 case GGML_OP_CONV_TRANSPOSE_2D:
1906 {
1907 ggml_compute_forward_conv_transpose_2d(params, tensor);
1908 } break;
1909 case GGML_OP_POOL_1D:
1910 {
1911 ggml_compute_forward_pool_1d(params, tensor);
1912 } break;
1913 case GGML_OP_POOL_2D:
1914 {
1915 ggml_compute_forward_pool_2d(params, tensor);
1916 } break;
1917 case GGML_OP_POOL_2D_BACK:
1918 {
1919 ggml_compute_forward_pool_2d_back(params, tensor);
1920 } break;
1921 case GGML_OP_UPSCALE:
1922 {
1923 ggml_compute_forward_upscale(params, tensor);
1924 } break;
1925 case GGML_OP_PAD:
1926 {
1927 ggml_compute_forward_pad(params, tensor);
1928 } break;
1929 case GGML_OP_PAD_REFLECT_1D:
1930 {
1931 ggml_compute_forward_pad_reflect_1d(params, tensor);
1932 } break;
1933 case GGML_OP_ROLL:
1934 {
1935 ggml_compute_forward_roll(params, tensor);
1936 } break;
1937 case GGML_OP_ARANGE:
1938 {
1939 ggml_compute_forward_arange(params, tensor);
1940 } break;
1941 case GGML_OP_TIMESTEP_EMBEDDING:
1942 {
1943 ggml_compute_forward_timestep_embedding(params, tensor);
1944 } break;
1945 case GGML_OP_ARGSORT:
1946 {
1947 ggml_compute_forward_argsort(params, tensor);
1948 } break;
1949 case GGML_OP_TOP_K:
1950 {
1951 ggml_compute_forward_top_k(params, tensor);
1952 } break;
1953 case GGML_OP_LEAKY_RELU:
1954 {
1955 ggml_compute_forward_leaky_relu(params, tensor);
1956 } break;
1957 case GGML_OP_TRI:
1958 {
1959 ggml_compute_forward_tri(params, tensor);
1960 } break;
1961 case GGML_OP_FILL:
1962 {
1963 ggml_compute_forward_fill(params, tensor);
1964 } break;
1965 case GGML_OP_FLASH_ATTN_EXT:
1966 {
1967 ggml_compute_forward_flash_attn_ext(params, tensor);
1968 } break;
1969 case GGML_OP_FLASH_ATTN_BACK:
1970 {
1971 int32_t t = ggml_get_op_params_i32(tensor, 0);
1972 GGML_ASSERT(t == 0 || t == 1);
1973 bool masked = t != 0;
1974 ggml_compute_forward_flash_attn_back(params, masked, tensor);
1975 } break;
1976 case GGML_OP_SSM_CONV:
1977 {
1978 ggml_compute_forward_ssm_conv(params, tensor);
1979 } break;
1980 case GGML_OP_SSM_SCAN:
1981 {
1982 ggml_compute_forward_ssm_scan(params, tensor);
1983 } break;
1984 case GGML_OP_WIN_PART:
1985 {
1986 ggml_compute_forward_win_part(params, tensor);
1987 } break;
1988 case GGML_OP_WIN_UNPART:
1989 {
1990 ggml_compute_forward_win_unpart(params, tensor);
1991 } break;
1992 case GGML_OP_UNARY:
1993 {
1994 ggml_compute_forward_unary(params, tensor);
1995 } break;
1996 case GGML_OP_GLU:
1997 {
1998 ggml_compute_forward_glu(params, tensor);
1999 } break;
2000 case GGML_OP_GET_REL_POS:
2001 {
2002 ggml_compute_forward_get_rel_pos(params, tensor);
2003 } break;
2004 case GGML_OP_ADD_REL_POS:
2005 {
2006 ggml_compute_forward_add_rel_pos(params, tensor);
2007 } break;
2008 case GGML_OP_RWKV_WKV6:
2009 {
2010 ggml_compute_forward_rwkv_wkv6(params, tensor);
2011 } break;
2012 case GGML_OP_GATED_LINEAR_ATTN:
2013 {
2014 ggml_compute_forward_gla(params, tensor);
2015 } break;
2016 case GGML_OP_RWKV_WKV7:
2017 {
2018 ggml_compute_forward_rwkv_wkv7(params, tensor);
2019 } break;
2020 case GGML_OP_SOLVE_TRI:
2021 {
2022 ggml_compute_forward_solve_tri(params, tensor);
2023 } break;
2024 case GGML_OP_MAP_CUSTOM1:
2025 {
2026 ggml_compute_forward_map_custom1(params, tensor);
2027 }
2028 break;
2029 case GGML_OP_MAP_CUSTOM2:
2030 {
2031 ggml_compute_forward_map_custom2(params, tensor);
2032 }
2033 break;
2034 case GGML_OP_MAP_CUSTOM3:
2035 {
2036 ggml_compute_forward_map_custom3(params, tensor);
2037 }
2038 break;
2039 case GGML_OP_CUSTOM:
2040 {
2041 ggml_compute_forward_custom(params, tensor);
2042 }
2043 break;
2044 case GGML_OP_CROSS_ENTROPY_LOSS:
2045 {
2046 ggml_compute_forward_cross_entropy_loss(params, tensor);
2047 }
2048 break;
2049 case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
2050 {
2051 ggml_compute_forward_cross_entropy_loss_back(params, tensor);
2052 }
2053 break;
2054 case GGML_OP_OPT_STEP_ADAMW:
2055 {
2056 ggml_compute_forward_opt_step_adamw(params, tensor);
2057 }
2058 break;
2059 case GGML_OP_OPT_STEP_SGD:
2060 {
2061 ggml_compute_forward_opt_step_sgd(params, tensor);
2062 }
2063 break;
2064 case GGML_OP_NONE:
2065 {
2066 // nop
2067 } break;
2068 case GGML_OP_RESHAPE:
2069 {
2070 // nop
2071 } break;
2072 case GGML_OP_PERMUTE:
2073 {
2074 // nop
2075 } break;
2076 case GGML_OP_VIEW:
2077 {
2078 // nop
2079 } break;
2080 case GGML_OP_TRANSPOSE:
2081 {
2082 // nop
2083 } break;
2084 case GGML_OP_COUNT:
2085 {
2086 GGML_ABORT("fatal error");
2087 }
2088 }
2089}
2090
2091// Android's libc implementation "bionic" does not support setting affinity
2092#if defined(__gnu_linux__)
2093static void set_numa_thread_affinity(int thread_n) {
2094 if (!ggml_is_numa()) {
2095 return;
2096 }
2097
2098 int node_num;
2099 int rv;
2100 size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
2101
2102 switch(g_state.numa.numa_strategy) {
2103 case GGML_NUMA_STRATEGY_DISTRIBUTE:
2104 // run thread on node_num thread_n / (threads per node)
2105 node_num = thread_n % g_state.numa.n_nodes;
2106 break;
2107 case GGML_NUMA_STRATEGY_ISOLATE:
2108 // run thread on current_node
2109 node_num = g_state.numa.current_node;
2110 break;
2111 case GGML_NUMA_STRATEGY_NUMACTL:
2112 // use the cpuset that numactl gave us
2113 rv = pthread_setaffinity_np(pthread_self(), setsize, &g_state.numa.cpuset);
2114 if (rv) {
2115 fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n",strerror(rv));
2116 }
2117 return;
2118 default:
2119 return;
2120 }
2121
2122 struct ggml_numa_node * node = &g_state.numa.nodes[node_num];
2123
2124 cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
2125 CPU_ZERO_S(setsize, cpus);
2126 for (size_t i = 0; i < node->n_cpus; ++i) {
2127 CPU_SET_S(node->cpus[i], setsize, cpus);
2128 }
2129
2130 rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
2131 if (rv) {
2132 fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
2133 }
2134
2135 CPU_FREE(cpus);
2136}
2137
2138static void clear_numa_thread_affinity(void) {
2139 if (!ggml_is_numa()) {
2140 return;
2141 }
2142
2143 size_t setsize = CPU_ALLOC_SIZE(g_state.numa.total_cpus);
2144
2145 cpu_set_t * cpus = CPU_ALLOC(g_state.numa.total_cpus);
2146 CPU_ZERO_S(setsize, cpus);
2147 for (unsigned i = 0; i < g_state.numa.total_cpus; ++i) {
2148 CPU_SET_S(i, setsize, cpus);
2149 }
2150
2151 int rv = pthread_setaffinity_np(pthread_self(), setsize, cpus);
2152 if (rv) {
2153 fprintf(stderr, "warning: pthread_setaffinity_np() failed: %s\n", strerror(rv));
2154 }
2155
2156 CPU_FREE(cpus);
2157}
2158#else
2159// TODO: Windows etc.
2160// (the linux implementation may also work on BSD, someone should test)
2161static void set_numa_thread_affinity(int thread_n) { UNUSED(thread_n); }
2162static void clear_numa_thread_affinity(void) {}
2163#endif
2164
2165static int ggml_get_n_tasks(struct ggml_tensor * node, int n_threads) {
2166 int n_tasks = 0;
2167
2168 if (ggml_is_empty(node)) {
2169 // no need to multi-thread a no-op
2170 n_tasks = 1;
2171 return n_tasks;
2172 }
2173
2174 switch (node->op) {
2175 case GGML_OP_CPY:
2176 case GGML_OP_DUP:
2177 case GGML_OP_CONT:
2178 case GGML_OP_ADD:
2179 case GGML_OP_ADD_ID:
2180 case GGML_OP_ADD1:
2181 case GGML_OP_ACC:
2182 case GGML_OP_CUMSUM:
2183 case GGML_OP_TRI:
2184 case GGML_OP_FILL:
2185 {
2186 n_tasks = n_threads;
2187 } break;
2188 case GGML_OP_SUB:
2189 case GGML_OP_SQR:
2190 case GGML_OP_SQRT:
2191 case GGML_OP_LOG:
2192 case GGML_OP_SIN:
2193 case GGML_OP_COS:
2194 case GGML_OP_SUM:
2195 case GGML_OP_SUM_ROWS:
2196 case GGML_OP_MEAN:
2197 case GGML_OP_ARGMAX:
2198 {
2199 n_tasks = 1;
2200 } break;
2201 case GGML_OP_COUNT_EQUAL:
2202 case GGML_OP_SOLVE_TRI:
2203 {
2204 n_tasks = n_threads;
2205 } break;
2206 case GGML_OP_REPEAT:
2207 case GGML_OP_REPEAT_BACK:
2208 case GGML_OP_LEAKY_RELU:
2209 {
2210 n_tasks = 1;
2211 } break;
2212 case GGML_OP_UNARY:
2213 switch (ggml_get_unary_op(node)) {
2214 case GGML_UNARY_OP_ABS:
2215 case GGML_UNARY_OP_SGN:
2216 case GGML_UNARY_OP_NEG:
2217 case GGML_UNARY_OP_STEP:
2218 case GGML_UNARY_OP_TANH:
2219 case GGML_UNARY_OP_ELU:
2220 case GGML_UNARY_OP_RELU:
2221 case GGML_UNARY_OP_SIGMOID:
2222 case GGML_UNARY_OP_HARDSWISH:
2223 case GGML_UNARY_OP_HARDSIGMOID:
2224 case GGML_UNARY_OP_EXP:
2225 case GGML_UNARY_OP_SOFTPLUS:
2226 case GGML_UNARY_OP_EXPM1:
2227 case GGML_UNARY_OP_FLOOR:
2228 case GGML_UNARY_OP_CEIL:
2229 case GGML_UNARY_OP_ROUND:
2230 case GGML_UNARY_OP_TRUNC:
2231 {
2232 n_tasks = 1;
2233 } break;
2234
2235 case GGML_UNARY_OP_GELU:
2236 case GGML_UNARY_OP_GELU_ERF:
2237 case GGML_UNARY_OP_GELU_QUICK:
2238 case GGML_UNARY_OP_SILU:
2239 case GGML_UNARY_OP_XIELU:
2240 {
2241 n_tasks = n_threads;
2242 } break;
2243 default:
2244 GGML_ABORT("fatal error");
2245 }
2246 break;
2247 case GGML_OP_GLU:
2248 switch (ggml_get_glu_op(node)) {
2249 case GGML_GLU_OP_REGLU:
2250 case GGML_GLU_OP_GEGLU:
2251 case GGML_GLU_OP_SWIGLU:
2252 case GGML_GLU_OP_SWIGLU_OAI:
2253 case GGML_GLU_OP_GEGLU_ERF:
2254 case GGML_GLU_OP_GEGLU_QUICK:
2255 {
2256 n_tasks = n_threads;
2257 } break;
2258 default:
2259 GGML_ABORT("fatal error");
2260 }
2261 break;
2262 case GGML_OP_SILU_BACK:
2263 case GGML_OP_MUL:
2264 case GGML_OP_DIV:
2265 case GGML_OP_NORM:
2266 case GGML_OP_RMS_NORM:
2267 case GGML_OP_RMS_NORM_BACK:
2268 case GGML_OP_L2_NORM:
2269 case GGML_OP_GROUP_NORM:
2270 case GGML_OP_CONCAT:
2271 case GGML_OP_MUL_MAT:
2272 case GGML_OP_MUL_MAT_ID:
2273 case GGML_OP_OUT_PROD:
2274 {
2275 n_tasks = n_threads;
2276 } break;
2277 case GGML_OP_GET_ROWS:
2278 case GGML_OP_SET_ROWS:
2279 {
2280 // FIXME: get_rows can use additional threads, but the cost of launching additional threads
2281 // decreases performance with GPU offloading
2282 //n_tasks = n_threads;
2283 n_tasks = 1;
2284 } break;
2285 case GGML_OP_SCALE:
2286 case GGML_OP_SET:
2287 case GGML_OP_RESHAPE:
2288 case GGML_OP_VIEW:
2289 case GGML_OP_PERMUTE:
2290 case GGML_OP_TRANSPOSE:
2291 case GGML_OP_GET_ROWS_BACK:
2292 case GGML_OP_DIAG:
2293 {
2294 n_tasks = 1;
2295 } break;
2296 case GGML_OP_DIAG_MASK_ZERO:
2297 case GGML_OP_DIAG_MASK_INF:
2298 case GGML_OP_SOFT_MAX_BACK:
2299 case GGML_OP_ROPE:
2300 case GGML_OP_ROPE_BACK:
2301 case GGML_OP_ADD_REL_POS:
2302 {
2303 n_tasks = n_threads;
2304 } break;
2305 case GGML_OP_CLAMP:
2306 {
2307 n_tasks = 1; //TODO
2308 } break;
2309 case GGML_OP_SOFT_MAX:
2310 {
2311 n_tasks = MIN(n_threads, ggml_nrows(node->src[0]));
2312 } break;
2313 case GGML_OP_IM2COL:
2314 case GGML_OP_IM2COL_BACK:
2315 case GGML_OP_IM2COL_3D:
2316 case GGML_OP_CONV_2D:
2317 case GGML_OP_CONV_3D:
2318 case GGML_OP_CONV_2D_DW:
2319 case GGML_OP_CONV_TRANSPOSE_1D:
2320 case GGML_OP_CONV_TRANSPOSE_2D:
2321 {
2322 n_tasks = n_threads;
2323 } break;
2324 case GGML_OP_POOL_1D:
2325 case GGML_OP_POOL_2D:
2326 case GGML_OP_POOL_2D_BACK:
2327 {
2328 n_tasks = 1;
2329 } break;
2330 case GGML_OP_UPSCALE:
2331 case GGML_OP_PAD:
2332 case GGML_OP_PAD_REFLECT_1D:
2333 case GGML_OP_ROLL:
2334 case GGML_OP_ARANGE:
2335 case GGML_OP_TIMESTEP_EMBEDDING:
2336 case GGML_OP_ARGSORT:
2337 case GGML_OP_TOP_K:
2338 case GGML_OP_FLASH_ATTN_EXT:
2339 case GGML_OP_FLASH_ATTN_BACK:
2340 case GGML_OP_SSM_CONV:
2341 case GGML_OP_SSM_SCAN:
2342 case GGML_OP_RWKV_WKV6:
2343 case GGML_OP_GATED_LINEAR_ATTN:
2344 case GGML_OP_RWKV_WKV7:
2345 {
2346 n_tasks = n_threads;
2347 } break;
2348 case GGML_OP_WIN_PART:
2349 case GGML_OP_WIN_UNPART:
2350 case GGML_OP_GET_REL_POS:
2351 {
2352 n_tasks = 1;
2353 } break;
2354 case GGML_OP_MAP_CUSTOM1:
2355 {
2356 struct ggml_map_custom1_op_params p;
2357 memcpy(&p, node->op_params, sizeof(p));
2358 if (p.n_tasks == GGML_N_TASKS_MAX) {
2359 n_tasks = n_threads;
2360 } else {
2361 n_tasks = MIN(p.n_tasks, n_threads);
2362 }
2363 } break;
2364 case GGML_OP_MAP_CUSTOM2:
2365 {
2366 struct ggml_map_custom2_op_params p;
2367 memcpy(&p, node->op_params, sizeof(p));
2368 if (p.n_tasks == GGML_N_TASKS_MAX) {
2369 n_tasks = n_threads;
2370 } else {
2371 n_tasks = MIN(p.n_tasks, n_threads);
2372 }
2373 } break;
2374 case GGML_OP_MAP_CUSTOM3:
2375 {
2376 struct ggml_map_custom3_op_params p;
2377 memcpy(&p, node->op_params, sizeof(p));
2378 if (p.n_tasks == GGML_N_TASKS_MAX) {
2379 n_tasks = n_threads;
2380 } else {
2381 n_tasks = MIN(p.n_tasks, n_threads);
2382 }
2383 } break;
2384 case GGML_OP_CUSTOM:
2385 {
2386 struct ggml_custom_op_params p;
2387 memcpy(&p, node->op_params, sizeof(p));
2388 if (p.n_tasks == GGML_N_TASKS_MAX) {
2389 n_tasks = n_threads;
2390 } else {
2391 n_tasks = MIN(p.n_tasks, n_threads);
2392 }
2393 } break;
2394 case GGML_OP_CROSS_ENTROPY_LOSS:
2395 case GGML_OP_CROSS_ENTROPY_LOSS_BACK:
2396 case GGML_OP_OPT_STEP_ADAMW:
2397 case GGML_OP_OPT_STEP_SGD:
2398 {
2399 n_tasks = n_threads;
2400 } break;
2401 case GGML_OP_NONE:
2402 {
2403 n_tasks = 1;
2404 } break;
2405 case GGML_OP_COUNT:
2406 {
2407 GGML_ABORT("fatal error");
2408 }
2409 default:
2410 {
2411 fprintf(stderr, "%s: op not implemented: ", __func__);
2412 if (node->op < GGML_OP_COUNT) {
2413 fprintf(stderr, "%s\n", ggml_op_name(node->op));
2414 } else {
2415 fprintf(stderr, "%d\n", node->op);
2416 }
2417 GGML_ABORT("fatal error");
2418 }
2419 }
2420
2421 assert(n_tasks > 0);
2422
2423 return n_tasks;
2424}
2425
2426static thread_ret_t ggml_graph_compute_secondary_thread(void* data);
2427
2428#if defined(_WIN32)
2429#include "windows.h"
2430
2431// TODO: support > 64 CPUs
2432static bool ggml_thread_apply_affinity(bool * mask) {
2433 HANDLE h = GetCurrentThread();
2434 uint64_t bitmask = 0ULL;
2435
2436 assert(GGML_MAX_N_THREADS >= 64);
2437
2438 for (int32_t i = 0; i < 8; i++) {
2439 int32_t idx = i * 8;
2440 uint8_t val = 0;
2441 val |= mask[idx + 0] << 0;
2442 val |= mask[idx + 1] << 1;
2443 val |= mask[idx + 2] << 2;
2444 val |= mask[idx + 3] << 3;
2445 val |= mask[idx + 4] << 4;
2446 val |= mask[idx + 5] << 5;
2447 val |= mask[idx + 6] << 6;
2448 val |= mask[idx + 7] << 7;
2449 bitmask |= (uint64_t)val << idx;
2450 }
2451
2452 for (int32_t i = 64; i < GGML_MAX_N_THREADS; i++) {
2453 if (mask[i]) {
2454 fprintf(stderr, "warn: setting thread-affinity for > 64 CPUs isn't supported on windows!\n");
2455 break;
2456 }
2457 }
2458
2459 DWORD_PTR m = (DWORD_PTR)bitmask;
2460
2461 m = SetThreadAffinityMask(h, m);
2462
2463 return m != 0;
2464}
2465
2466static bool ggml_thread_apply_priority(int32_t prio) {
2467 // Note that on Windows the Process Priority Class must be updated in order to set Thread priority.
2468 // This is up to the applications.
2469 DWORD p = THREAD_PRIORITY_NORMAL;
2470 switch (prio) {
2471 case GGML_SCHED_PRIO_LOW: p = THREAD_PRIORITY_BELOW_NORMAL; break;
2472 case GGML_SCHED_PRIO_NORMAL: p = THREAD_PRIORITY_NORMAL; break;
2473 case GGML_SCHED_PRIO_MEDIUM: p = THREAD_PRIORITY_ABOVE_NORMAL; break;
2474 case GGML_SCHED_PRIO_HIGH: p = THREAD_PRIORITY_HIGHEST; break;
2475 case GGML_SCHED_PRIO_REALTIME: p = THREAD_PRIORITY_TIME_CRITICAL; break;
2476 }
2477
2478 if (prio != GGML_SCHED_PRIO_LOW) {
2479 // Tell Windows that this thread should not be throttled (needs its own CPU core).
2480 // Newer Windows 11 versions aggresively park (offline) CPU cores and often place
2481 // all our threads onto the first 4 cores which results in terrible performance with
2482 // n_threads > 4
2483 #if _WIN32_WINNT >= 0x0602
2484 THREAD_POWER_THROTTLING_STATE t;
2485 ZeroMemory(&t, sizeof(t));
2486 t.Version = THREAD_POWER_THROTTLING_CURRENT_VERSION;
2487 t.ControlMask = THREAD_POWER_THROTTLING_EXECUTION_SPEED;
2488 t.StateMask = 0;
2489
2490 if (!SetThreadInformation(GetCurrentThread(), ThreadPowerThrottling, &t, sizeof(t))) {
2491 GGML_LOG_DEBUG("failed to disable thread power throttling %d : (%d)\n", prio, (int) GetLastError());
2492 return false;
2493 }
2494 #endif
2495 }
2496
2497 if (prio == GGML_SCHED_PRIO_NORMAL) {
2498 // Keep inherited policy/priority
2499 return true;
2500 }
2501
2502 if (!SetThreadPriority(GetCurrentThread(), p)) {
2503 fprintf(stderr, "warn: failed to set thread priority %d : (%d)\n", prio, (int) GetLastError());
2504 return false;
2505 }
2506
2507 return true;
2508}
2509
2510#elif defined(__APPLE__)
2511#include <sys/types.h>
2512#include <sys/resource.h>
2513
2514static bool ggml_thread_apply_affinity(const bool * mask) {
2515 // Not supported on Apple platforms
2516 UNUSED(mask);
2517 return true;
2518}
2519
2520static bool ggml_thread_apply_priority(int32_t prio) {
2521 struct sched_param p;
2522 int32_t policy = SCHED_OTHER;
2523 switch (prio) {
2524 // TODO: there seems to be no way to set lower prio on Apple platforms
2525 case GGML_SCHED_PRIO_LOW: policy = SCHED_OTHER; p.sched_priority = 0; break;
2526 case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break;
2527 case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break;
2528 case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break;
2529 case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break;
2530 }
2531
2532 if (prio == GGML_SCHED_PRIO_NORMAL) {
2533 // Keep inherited policy/priority
2534 return true;
2535 }
2536
2537 int32_t err = pthread_setschedparam(pthread_self(), policy, &p);
2538 if (err != 0) {
2539 fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err);
2540 return false;
2541 }
2542
2543 return true;
2544}
2545
2546#elif defined(__gnu_linux__)
2547// TODO: this may not work on BSD, to be verified
2548
2549static bool ggml_thread_apply_affinity(const bool * mask) {
2550 cpu_set_t cpuset;
2551 int err;
2552
2553 CPU_ZERO(&cpuset);
2554
2555 for (uint32_t i = 0; i < GGML_MAX_N_THREADS; i++) {
2556 if (mask[i]) {
2557 GGML_PRINT_DEBUG("Thread %lx: adding %d to cpuset\n", pthread_self(), i);
2558 CPU_SET(i, &cpuset);
2559 }
2560 }
2561
2562#ifdef __ANDROID__
2563 err = sched_setaffinity(0, sizeof(cpuset), &cpuset);
2564 if (err < 0) {
2565 err = errno;
2566 }
2567#else
2568 err = pthread_setaffinity_np(pthread_self(), sizeof(cpuset), &cpuset);
2569#endif
2570 if (err != 0) {
2571 fprintf(stderr, "warn: failed to set affinity mask 0x%llx : %s (%d)\n", (unsigned long long)mask, strerror(err), err);
2572 return false;
2573 }
2574
2575 return true;
2576}
2577
2578static bool ggml_thread_apply_priority(int32_t prio) {
2579 struct sched_param p;
2580 int32_t policy = SCHED_OTHER;
2581 switch (prio) {
2582 case GGML_SCHED_PRIO_LOW: policy = SCHED_BATCH; p.sched_priority = 0; break;
2583 case GGML_SCHED_PRIO_NORMAL: policy = SCHED_OTHER; p.sched_priority = 0; break;
2584 case GGML_SCHED_PRIO_MEDIUM: policy = SCHED_FIFO; p.sched_priority = 40; break;
2585 case GGML_SCHED_PRIO_HIGH: policy = SCHED_FIFO; p.sched_priority = 80; break;
2586 case GGML_SCHED_PRIO_REALTIME: policy = SCHED_FIFO; p.sched_priority = 90; break;
2587 }
2588
2589 if (prio == GGML_SCHED_PRIO_NORMAL) {
2590 // Keep inherited policy/priority
2591 return true;
2592 }
2593
2594 int32_t err = pthread_setschedparam(pthread_self(), policy, &p);
2595 if (err != 0) {
2596 fprintf(stderr, "warn: failed to set thread priority %d : %s (%d)\n", prio, strerror(err), err);
2597 return false;
2598 }
2599
2600 return true;
2601}
2602
2603#else // unsupported platforms
2604
2605static bool ggml_thread_apply_affinity(const bool * mask) {
2606 UNUSED(mask);
2607 return true;
2608}
2609
2610static bool ggml_thread_apply_priority(int32_t prio) {
2611 UNUSED(prio);
2612 return true;
2613}
2614
2615#endif
2616
2617static bool ggml_thread_cpumask_is_valid(const bool * mask) {
2618 for (int i = 0; i < GGML_MAX_N_THREADS; i++) {
2619 if (mask[i]) { return true; }
2620 }
2621 return false;
2622}
2623
2624static void ggml_thread_cpumask_next(const bool * global_mask, bool * local_mask, bool strict, int32_t* iter) {
2625 if (!strict) {
2626 memcpy(local_mask, global_mask, GGML_MAX_N_THREADS);
2627 return;
2628 } else {
2629 memset(local_mask, 0, GGML_MAX_N_THREADS);
2630 int32_t base_idx = *iter;
2631 for (int32_t i = 0; i < GGML_MAX_N_THREADS; i++) {
2632 int32_t idx = base_idx + i;
2633 if (idx >= GGML_MAX_N_THREADS) {
2634 // Just a cheaper modulo
2635 idx -= GGML_MAX_N_THREADS;
2636 }
2637 if (global_mask[idx]) {
2638 local_mask[idx] = 1;
2639 *iter = idx + 1;
2640 return;
2641 }
2642 }
2643 }
2644}
2645
2646void ggml_threadpool_free(struct ggml_threadpool* threadpool) {
2647 if (!threadpool) return;
2648
2649 const int n_threads = threadpool->n_threads;
2650
2651#ifndef GGML_USE_OPENMP
2652 struct ggml_compute_state* workers = threadpool->workers;
2653
2654 ggml_mutex_lock(&threadpool->mutex);
2655
2656 threadpool->stop = true;
2657 threadpool->pause = false;
2658
2659 ggml_cond_broadcast(&threadpool->cond);
2660 ggml_mutex_unlock(&threadpool->mutex);
2661
2662 for (int j = 1; j < n_threads; j++) {
2663 int32_t rc = ggml_thread_join(workers[j].thrd, NULL);
2664 GGML_ASSERT(rc == GGML_EXIT_SUCCESS || rc == GGML_EXIT_ABORTED);
2665 UNUSED(rc);
2666 }
2667
2668 ggml_mutex_destroy(&threadpool->mutex);
2669 ggml_cond_destroy(&threadpool->cond);
2670#endif // GGML_USE_OPENMP
2671
2672 const size_t workers_size = sizeof(struct ggml_compute_state) * n_threads;
2673 ggml_aligned_free(threadpool->workers, workers_size);
2674 ggml_aligned_free(threadpool, sizeof(struct ggml_threadpool));
2675}
2676
2677#ifndef GGML_USE_OPENMP
2678// pause/resume must be called under mutex
2679static void ggml_threadpool_pause_locked(struct ggml_threadpool * threadpool) {
2680 GGML_PRINT_DEBUG("Pausing threadpool\n");
2681 threadpool->pause = true;
2682 ggml_cond_broadcast(&threadpool->cond);
2683}
2684
2685static void ggml_threadpool_resume_locked(struct ggml_threadpool * threadpool) {
2686 GGML_PRINT_DEBUG("Resuming threadpool\n");
2687 threadpool->pause = false;
2688 ggml_cond_broadcast(&threadpool->cond);
2689}
2690#endif
2691
2692void ggml_threadpool_pause(struct ggml_threadpool * threadpool) {
2693#ifndef GGML_USE_OPENMP
2694 ggml_mutex_lock(&threadpool->mutex);
2695 if (!threadpool->pause) {
2696 ggml_threadpool_pause_locked(threadpool);
2697 }
2698 ggml_mutex_unlock(&threadpool->mutex);
2699#else
2700 UNUSED(threadpool);
2701#endif
2702}
2703
2704void ggml_threadpool_resume(struct ggml_threadpool * threadpool) {
2705#ifndef GGML_USE_OPENMP
2706 ggml_mutex_lock(&threadpool->mutex);
2707 if (threadpool->pause) {
2708 ggml_threadpool_resume_locked(threadpool);
2709 }
2710 ggml_mutex_unlock(&threadpool->mutex);
2711#else
2712 UNUSED(threadpool);
2713#endif
2714}
2715
2716struct ggml_cplan ggml_graph_plan(
2717 const struct ggml_cgraph * cgraph,
2718 int n_threads,
2719 struct ggml_threadpool * threadpool) {
2720
2721 if (threadpool == NULL) {
2722 //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads);
2723 }
2724 if (n_threads <= 0) {
2725 n_threads = threadpool ? threadpool->n_threads : GGML_DEFAULT_N_THREADS;
2726 }
2727
2728#if defined(__EMSCRIPTEN__) && !defined(__EMSCRIPTEN_PTHREADS__)
2729 // Emscripten without pthreads support can only use a single thread
2730 n_threads = 1;
2731#endif
2732
2733 size_t work_size = 0;
2734
2735 struct ggml_cplan cplan;
2736 memset(&cplan, 0, sizeof(struct ggml_cplan));
2737
2738 int max_tasks = 1;
2739
2740 // thread scheduling for the different operations + work buffer size estimation
2741 for (int i = 0; i < cgraph->n_nodes; i++) {
2742 struct ggml_tensor * node = cgraph->nodes[i];
2743
2744 const int n_tasks = ggml_get_n_tasks(node, n_threads);
2745
2746 max_tasks = MAX(max_tasks, n_tasks);
2747
2748 size_t cur = 0;
2749
2750 if (!ggml_cpu_extra_work_size(n_threads, node, &cur)) {
2751 switch (node->op) {
2752 case GGML_OP_CPY:
2753 case GGML_OP_DUP:
2754 {
2755 if (ggml_is_quantized(node->type) ||
2756 // F16 -> BF16 and BF16 -> F16 copies go through intermediate F32
2757 (node->src[0]->type == GGML_TYPE_F16 && node->src[1] && node->src[1]->type == GGML_TYPE_BF16) ||
2758 (node->src[0]->type == GGML_TYPE_BF16 && node->src[1] && node->src[1]->type == GGML_TYPE_F16) ||
2759 // conversion between F32 and I32
2760 (node->src[0]->type == GGML_TYPE_F32 && node->src[1] && node->src[1]->type == GGML_TYPE_I32) ||
2761 (node->src[0]->type == GGML_TYPE_I32 && node->src[1] && node->src[1]->type == GGML_TYPE_F32)) {
2762 cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
2763 }
2764 } break;
2765 case GGML_OP_ADD:
2766 case GGML_OP_ADD_ID:
2767 case GGML_OP_ADD1:
2768 {
2769 if (ggml_is_quantized(node->src[0]->type)) {
2770 cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
2771 }
2772 } break;
2773 case GGML_OP_ACC:
2774 {
2775 if (ggml_is_quantized(node->src[0]->type)) {
2776 cur = ggml_type_size(GGML_TYPE_F32) * node->src[1]->ne[0] * n_tasks;
2777 }
2778 } break;
2779 case GGML_OP_COUNT_EQUAL:
2780 {
2781 cur = ggml_type_size(node->type)*n_tasks;
2782 } break;
2783 case GGML_OP_MUL_MAT:
2784 {
2785 const enum ggml_type vec_dot_type = type_traits_cpu[node->src[0]->type].vec_dot_type;
2786
2787 if (node->src[1]->type != vec_dot_type) {
2788 cur = ggml_row_size(vec_dot_type, ggml_nelements(node->src[1]));
2789 }
2790 } break;
2791 case GGML_OP_MUL_MAT_ID:
2792 {
2793 cur = 0;
2794 const struct ggml_tensor * src0 = node->src[0];
2795 const struct ggml_tensor * src1 = node->src[1];
2796 const struct ggml_tensor * ids = node->src[2];
2797 const enum ggml_type vec_dot_type = type_traits_cpu[src0->type].vec_dot_type;
2798 const int n_as = src0->ne[2];
2799 // src1
2800 if (src1->type != vec_dot_type) {
2801 cur += ggml_row_size(vec_dot_type, ggml_nelements(src1)) + sizeof(int64_t);
2802 }
2803 // matrix_row_counts
2804 cur += n_as * sizeof(int64_t) + sizeof(int64_t);
2805 // matrix_rows
2806 cur += n_as*ids->ne[0]*ids->ne[1]*sizeof(struct mmid_row_mapping) + sizeof(int64_t);
2807 // atomic_current_chunk
2808 cur += CACHE_LINE_SIZE*n_as + CACHE_LINE_SIZE;
2809 } break;
2810 case GGML_OP_OUT_PROD:
2811 {
2812 if (ggml_is_quantized(node->src[0]->type)) {
2813 cur = ggml_type_size(GGML_TYPE_F32) * node->src[0]->ne[0] * n_tasks;
2814 }
2815 } break;
2816 case GGML_OP_SOFT_MAX:
2817 case GGML_OP_ROPE:
2818 case GGML_OP_ROPE_BACK:
2819 {
2820 cur = ggml_type_size(GGML_TYPE_F32) * node->ne[0] * n_tasks;
2821 } break;
2822 case GGML_OP_CONV_TRANSPOSE_1D:
2823 {
2824 GGML_ASSERT(node->src[0]->ne[3] == 1);
2825 GGML_ASSERT(node->src[1]->ne[2] == 1);
2826 GGML_ASSERT(node->src[1]->ne[3] == 1);
2827
2828 const int64_t ne00 = node->src[0]->ne[0]; // K
2829 const int64_t ne01 = node->src[0]->ne[1]; // Cout
2830 const int64_t ne02 = node->src[0]->ne[2]; // Cin
2831 const int64_t ne10 = node->src[1]->ne[0]; // L
2832 const int64_t ne11 = node->src[1]->ne[1]; // Cin
2833
2834 if ((node->src[0]->type == GGML_TYPE_F16 ||
2835 node->src[0]->type == GGML_TYPE_BF16) &&
2836 node->src[1]->type == GGML_TYPE_F32) {
2837 cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02;
2838 cur += sizeof(ggml_fp16_t)*ne10*ne11;
2839 } else if (node->src[0]->type == GGML_TYPE_F32 &&
2840 node->src[1]->type == GGML_TYPE_F32) {
2841 cur += sizeof(float)*ne00*ne01*ne02;
2842 cur += sizeof(float)*ne10*ne11;
2843 } else {
2844 GGML_ABORT("fatal error");
2845 }
2846 } break;
2847 case GGML_OP_CONV_2D:
2848 case GGML_OP_CONV_3D:
2849 {
2850 cur = GGML_IM2COL_WORK_SIZE;
2851 } break;
2852 case GGML_OP_CONV_TRANSPOSE_2D:
2853 {
2854 const int64_t ne00 = node->src[0]->ne[0]; // W
2855 const int64_t ne01 = node->src[0]->ne[1]; // H
2856 const int64_t ne02 = node->src[0]->ne[2]; // Channels Out
2857 const int64_t ne03 = node->src[0]->ne[3]; // Channels In
2858
2859 const int64_t ne10 = node->src[1]->ne[0]; // W
2860 const int64_t ne11 = node->src[1]->ne[1]; // H
2861 const int64_t ne12 = node->src[1]->ne[2]; // Channels In
2862
2863 cur += sizeof(ggml_fp16_t)*ne00*ne01*ne02*ne03;
2864 cur += sizeof(ggml_fp16_t)*ne10*ne11*ne12;
2865 } break;
2866 case GGML_OP_TOP_K:
2867 {
2868 cur += sizeof(int32_t)*node->src[0]->ne[0]*n_tasks;
2869 } break;
2870 case GGML_OP_FLASH_ATTN_EXT:
2871 {
2872 const int64_t neq2 = node->src[0]->ne[2]; // number of query heads
2873 const int64_t DK = node->src[1]->ne[0];
2874 const int64_t DV = node->src[2]->ne[0];
2875
2876 // Tiled flash attention scratch (tile sizes defined in common.h)
2877 // Per-thread: Q_q + KQ + mask + VKQ32 + V32 + padding
2878 size_t prefill = sizeof(float)*(GGML_FA_TILE_Q*DK + 2*GGML_FA_TILE_Q*GGML_FA_TILE_KV + GGML_FA_TILE_Q*DV + GGML_FA_TILE_KV*DV)*n_tasks;
2879
2880 // Decode path: n_kv_chunks = n_tasks (one chunk per thread)
2881 // Per-thread: VKQ accmulator (DV), partial M, partial S + intra-thread scratch for V, Q and VKQ
2882 size_t n_chunks = n_tasks;
2883 size_t decode = sizeof(float)*(neq2*n_chunks*(2+DV) + n_tasks*(DK + 2*DV));
2884
2885 cur += MAX(prefill, decode);
2886 } break;
2887 case GGML_OP_FLASH_ATTN_BACK:
2888 {
2889 const int64_t D = node->src[0]->ne[0];
2890 const int64_t ne11 = ggml_up(node->src[1]->ne[1], GGML_SOFT_MAX_UNROLL);
2891 const int64_t mxDn = MAX(D, ne11) * 2; // *2 because of S and SM in ggml_compute_forward_flash_attn_back
2892 if (node->src[1]->type == GGML_TYPE_F32) {
2893 cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
2894 cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
2895 } else if (node->src[1]->type == GGML_TYPE_F16) {
2896 cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
2897 cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
2898 } else if (node->src[1]->type == GGML_TYPE_BF16) {
2899 cur = sizeof(float)*mxDn*n_tasks; // TODO: this can become (n_tasks-1)
2900 cur += sizeof(float)*mxDn*n_tasks; // this is overestimated by x2
2901 }
2902 } break;
2903
2904 case GGML_OP_CROSS_ENTROPY_LOSS:
2905 {
2906 cur = ggml_type_size(node->type)*(n_tasks + node->src[0]->ne[0]*n_tasks);
2907 } break;
2908 case GGML_OP_COUNT:
2909 {
2910 GGML_ABORT("fatal error");
2911 }
2912 default:
2913 break;
2914 }
2915 }
2916
2917 work_size = MAX(work_size, cur);
2918 }
2919
2920 if (work_size > 0) {
2921 work_size += CACHE_LINE_SIZE*(n_threads);
2922 }
2923
2924 cplan.threadpool = threadpool;
2925 cplan.n_threads = MIN(max_tasks, n_threads);
2926 cplan.work_size = work_size;
2927 cplan.work_data = NULL;
2928
2929 return cplan;
2930}
2931
2932static thread_ret_t ggml_graph_compute_thread(void * data) {
2933 struct ggml_compute_state * state = (struct ggml_compute_state *) data;
2934 struct ggml_threadpool * tp = state->threadpool;
2935
2936 const struct ggml_cgraph * cgraph = tp->cgraph;
2937 const struct ggml_cplan * cplan = tp->cplan;
2938
2939 set_numa_thread_affinity(state->ith);
2940
2941 struct ggml_compute_params params = {
2942 /*.ith =*/ state->ith,
2943 /*.nth =*/ atomic_load_explicit(&tp->n_graph, memory_order_relaxed) & GGML_THREADPOOL_N_THREADS_MASK,
2944 /*.wsize =*/ cplan->work_size,
2945 /*.wdata =*/ cplan->work_data,
2946 /*.threadpool =*/ tp,
2947 /*.use_ref =*/ cplan->use_ref,
2948 };
2949
2950 GGML_PRINT_DEBUG("thread #%d compute-start cplan %p last-graph %d \n", state->ith, cplan, state->last_graph);
2951
2952 for (int node_n = 0; node_n < cgraph->n_nodes && atomic_load_explicit(&tp->abort, memory_order_relaxed) != node_n; node_n++) {
2953 struct ggml_tensor * node = cgraph->nodes[node_n];
2954
2955 if (ggml_op_is_empty(node->op)) {
2956 // skip NOPs
2957 continue;
2958 }
2959
2960 if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) {
2961 continue;
2962 }
2963
2964 ggml_compute_forward(¶ms, node);
2965
2966 if (state->ith == 0 && cplan->abort_callback &&
2967 cplan->abort_callback(cplan->abort_callback_data)) {
2968 atomic_store_explicit(&tp->abort, node_n + 1, memory_order_relaxed);
2969 tp->ec = GGML_STATUS_ABORTED;
2970 }
2971
2972 if (node_n + 1 < cgraph->n_nodes) {
2973 ggml_barrier(state->threadpool);
2974 }
2975 }
2976
2977 GGML_PRINT_DEBUG("thread #%d compute-done cplan %p last-graph %d \n", state->ith, cplan, state->last_graph);
2978
2979 ggml_barrier(state->threadpool);
2980
2981 return 0;
2982}
2983
2984#ifndef GGML_USE_OPENMP
2985
2986// check if thread is ready to proceed (exit from polling or sleeping)
2987// returns true if loops should exit, sets state->pending to indicate new work
2988static inline bool ggml_graph_compute_thread_ready(struct ggml_compute_state * state) {
2989 struct ggml_threadpool * threadpool = state->threadpool;
2990
2991 if (state->pending || threadpool->stop || threadpool->pause) { return true; }
2992
2993 // check for new graph/work
2994 int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed);
2995 int n_threads = n_graph & GGML_THREADPOOL_N_THREADS_MASK;
2996 if (n_graph != state->last_graph) {
2997 state->pending = (state->ith < n_threads);
2998 state->last_graph = n_graph;
2999 return true;
3000 }
3001
3002 return false;
3003}
3004
3005// sync thread state after polling
3006static inline void ggml_graph_compute_thread_sync(struct ggml_compute_state * state) {
3007 // TSAN doesn't support standalone fence yet, we use a dummy read-modify-write instead
3008 #ifdef GGML_TSAN_ENABLED
3009 atomic_fetch_add_explicit(&state->threadpool->n_graph, 0, memory_order_seq_cst);
3010 #else
3011 atomic_thread_fence(memory_order_seq_cst);
3012 #endif
3013 UNUSED(state);
3014}
3015
3016static inline bool ggml_graph_compute_poll_for_work(struct ggml_compute_state * state) {
3017 struct ggml_threadpool * threadpool = state->threadpool;
3018
3019 // This seems to make 0 ... 100 a decent range for polling level across modern processors.
3020 // Perhaps, we can adjust it dynamically based on load and things.
3021 const uint64_t n_rounds = 1024UL * 128 * threadpool->poll;
3022
3023 for (uint64_t i=0; !ggml_graph_compute_thread_ready(state) && i < n_rounds; i++) {
3024 // No new work. Keep polling.
3025 ggml_thread_cpu_relax();
3026 }
3027
3028 return state->pending;
3029}
3030
3031static inline bool ggml_graph_compute_check_for_work(struct ggml_compute_state * state) {
3032 struct ggml_threadpool * threadpool = state->threadpool;
3033
3034 if (ggml_graph_compute_poll_for_work(state)) {
3035 ggml_graph_compute_thread_sync(state);
3036 return state->pending;
3037 }
3038
3039 ggml_mutex_lock_shared(&threadpool->mutex);
3040 while (!ggml_graph_compute_thread_ready(state)) {
3041 // No new work. Wait for the signal.
3042 GGML_PRINT_DEBUG("thread #%d waiting for work (sleeping)\n", state->ith);
3043 ggml_cond_wait(&threadpool->cond, &threadpool->mutex);
3044 }
3045 ggml_mutex_unlock_shared(&threadpool->mutex);
3046
3047 return state->pending;
3048}
3049
3050static thread_ret_t ggml_graph_compute_secondary_thread(void* data) {
3051 struct ggml_compute_state * state = (struct ggml_compute_state *) data;
3052 struct ggml_threadpool * threadpool = state->threadpool;
3053
3054 ggml_thread_apply_priority(threadpool->prio);
3055 if (ggml_thread_cpumask_is_valid(state->cpumask)) {
3056 ggml_thread_apply_affinity(state->cpumask);
3057 }
3058
3059 while (true) {
3060 // Check if we need to sleep
3061 while (threadpool->pause) {
3062 GGML_PRINT_DEBUG("thread #%d inside pause loop\n", state->ith);
3063 ggml_mutex_lock_shared(&threadpool->mutex);
3064 if (threadpool->pause) {
3065 ggml_cond_wait(&threadpool->cond, &threadpool->mutex);
3066 }
3067 GGML_PRINT_DEBUG("thread #%d resuming after wait\n", state->ith);
3068 ggml_mutex_unlock_shared(&threadpool->mutex);
3069 }
3070
3071 // This needs to be checked for after the cond_wait
3072 if (threadpool->stop) break;
3073
3074 // Check if there is new work
3075 // The main thread is the only one that can dispatch new work
3076
3077 ggml_graph_compute_check_for_work(state);
3078 if (state->pending) {
3079 state->pending = false;
3080 ggml_graph_compute_thread(state);
3081 }
3082 }
3083
3084 return (thread_ret_t) 0;
3085}
3086
3087// Start processing new graph
3088static void ggml_graph_compute_kickoff(struct ggml_threadpool * threadpool, int n_threads)
3089{
3090 // Always take the mutex here because the worker threads are doing hybrid poll/wait
3091
3092 ggml_mutex_lock(&threadpool->mutex);
3093
3094 // Update the number of active threads and the graph count
3095 int n_graph = atomic_load_explicit(&threadpool->n_graph, memory_order_relaxed) >> GGML_THREADPOOL_N_THREADS_BITS;
3096 n_graph = ((n_graph + 1) << GGML_THREADPOOL_N_THREADS_BITS) | (n_threads & GGML_THREADPOOL_N_THREADS_MASK);
3097
3098 GGML_PRINT_DEBUG("compute-kickoff: n_threads %d n_graph %d\n", n_threads, n_graph);
3099
3100 // Indicate the graph is ready to be processed
3101 // We need the full seq-cst fence here because of the polling threads (used in thread_sync)
3102 atomic_store_explicit(&threadpool->n_graph, n_graph, memory_order_seq_cst);
3103
3104 if (threadpool->pause) {
3105 // Update main thread prio and affinity to match the threadpool settings
3106 ggml_thread_apply_priority(threadpool->prio);
3107 if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) {
3108 ggml_thread_apply_affinity(threadpool->workers[0].cpumask);
3109 }
3110
3111 // resume does cond broadcast
3112 ggml_threadpool_resume_locked(threadpool);
3113 } else {
3114 ggml_cond_broadcast(&threadpool->cond);
3115 }
3116
3117 ggml_mutex_unlock(&threadpool->mutex);
3118}
3119
3120#endif // GGML_USE_OPENMP
3121
3122static struct ggml_threadpool * ggml_threadpool_new_impl(
3123 struct ggml_threadpool_params * tpp,
3124 struct ggml_cgraph * cgraph,
3125 struct ggml_cplan * cplan) {
3126
3127 struct ggml_threadpool * threadpool =
3128 ggml_aligned_malloc(sizeof(struct ggml_threadpool));
3129 {
3130 threadpool->cgraph = cgraph;
3131 threadpool->cplan = cplan;
3132 threadpool->n_graph = 0;
3133 threadpool->n_barrier = 0;
3134 threadpool->n_barrier_passed = 0;
3135 threadpool->current_chunk = 0;
3136 threadpool->stop = false;
3137 threadpool->pause = tpp->paused;
3138 threadpool->abort = -1;
3139 threadpool->workers = NULL;
3140 threadpool->n_threads = tpp->n_threads;
3141 threadpool->poll = tpp->poll;
3142 threadpool->prio = tpp->prio;
3143 threadpool->ec = GGML_STATUS_SUCCESS;
3144 }
3145
3146 // Allocate and init workers state
3147 const size_t workers_size = sizeof(struct ggml_compute_state) * tpp->n_threads;
3148 struct ggml_compute_state * workers = ggml_aligned_malloc(workers_size);
3149
3150 memset(workers, 0, workers_size);
3151 for (int j = 0; j < tpp->n_threads; j++) {
3152 workers[j].threadpool = threadpool;
3153 workers[j].ith = j;
3154 }
3155
3156 threadpool->workers = workers;
3157
3158#ifdef GGML_USE_OPENMP
3159 int32_t cpumask_iter = 0;
3160
3161 // Compute CPU masks for each thread
3162 for (int j = 0; j < tpp->n_threads; j++) {
3163 ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter);
3164 }
3165#else // GGML_USE_OPENMP
3166 ggml_mutex_init(&threadpool->mutex);
3167 ggml_cond_init(&threadpool->cond);
3168
3169 // Spin the threads for all workers, and update CPU placements.
3170 // Place the main thread last (towards the higher numbered CPU cores).
3171
3172 int32_t cpumask_iter = 0;
3173
3174 for (int j = 1; j < tpp->n_threads; j++) {
3175 ggml_thread_cpumask_next(tpp->cpumask, workers[j].cpumask, tpp->strict_cpu, &cpumask_iter);
3176
3177 int32_t rc = ggml_thread_create(&workers[j].thrd, NULL, ggml_graph_compute_secondary_thread, &workers[j]);
3178 GGML_ASSERT(rc == 0);
3179 }
3180
3181 ggml_thread_cpumask_next(tpp->cpumask, workers[0].cpumask, tpp->strict_cpu, &cpumask_iter);
3182
3183 if (!threadpool->pause) {
3184 // Update main thread prio and affinity at the start, otherwise we'll do it in resume
3185 ggml_thread_apply_priority(threadpool->prio);
3186 if (ggml_thread_cpumask_is_valid(threadpool->workers[0].cpumask)) {
3187 ggml_thread_apply_affinity(threadpool->workers[0].cpumask);
3188 }
3189 }
3190#endif // GGML_USE_OPENMP
3191
3192 return threadpool;
3193}
3194
3195struct ggml_threadpool * ggml_threadpool_new(struct ggml_threadpool_params * tpp) {
3196 return ggml_threadpool_new_impl(tpp, NULL, NULL);
3197}
3198
3199enum ggml_status ggml_graph_compute(struct ggml_cgraph * cgraph, struct ggml_cplan * cplan) {
3200 ggml_cpu_init();
3201
3202 GGML_ASSERT(cplan);
3203 GGML_ASSERT(cplan->n_threads > 0);
3204 GGML_ASSERT(cplan->work_size == 0 || cplan->work_data != NULL);
3205
3206 int n_threads = cplan->n_threads;
3207 struct ggml_threadpool * threadpool = cplan->threadpool;
3208
3209 bool disposable_threadpool = false;
3210
3211 if (threadpool == NULL) {
3212 //GGML_PRINT_DEBUG("Threadpool is not specified. Will create a disposable threadpool : n_threads %d\n", n_threads);
3213 disposable_threadpool = true;
3214
3215 struct ggml_threadpool_params ttp = ggml_threadpool_params_default(n_threads);
3216 threadpool = ggml_threadpool_new_impl(&ttp, cgraph, cplan);
3217 } else {
3218 // Reset some of the parameters that need resetting
3219 // No worker threads should be accessing the parameters below at this stage
3220 threadpool->cgraph = cgraph;
3221 threadpool->cplan = cplan;
3222 threadpool->current_chunk = 0;
3223 threadpool->abort = -1;
3224 threadpool->ec = GGML_STATUS_SUCCESS;
3225 }
3226
3227#ifdef GGML_USE_OPENMP
3228 if (n_threads > 1) {
3229 #pragma omp parallel num_threads(n_threads)
3230 {
3231 #pragma omp single
3232 {
3233 // update the number of threads from the actual number of threads that we got from OpenMP
3234 n_threads = omp_get_num_threads();
3235 atomic_store_explicit(&threadpool->n_graph, n_threads, memory_order_relaxed);
3236 }
3237
3238 // Apply thread CPU mask and priority
3239 int ith = omp_get_thread_num();
3240
3241 ggml_thread_apply_priority(threadpool->prio);
3242 if (ggml_thread_cpumask_is_valid(threadpool->workers[ith].cpumask)) {
3243 ggml_thread_apply_affinity(threadpool->workers[ith].cpumask);
3244 }
3245 ggml_graph_compute_thread(&threadpool->workers[ith]);
3246 }
3247 } else {
3248 atomic_store_explicit(&threadpool->n_graph, 1, memory_order_relaxed);
3249 ggml_graph_compute_thread(&threadpool->workers[0]);
3250 }
3251#else
3252 if (n_threads > threadpool->n_threads) {
3253 GGML_LOG_WARN("cplan requested more threads (%d) than available (%d)\n", n_threads, threadpool->n_threads);
3254 n_threads = threadpool->n_threads;
3255 }
3256
3257 // Kick all threads to start the new graph
3258 ggml_graph_compute_kickoff(threadpool, n_threads);
3259
3260 // This is a work thread too
3261 ggml_graph_compute_thread(&threadpool->workers[0]);
3262#endif
3263
3264 // don't leave affinity set on the main thread
3265 clear_numa_thread_affinity();
3266
3267 enum ggml_status ret = threadpool->ec;
3268
3269 if (disposable_threadpool) {
3270 ggml_threadpool_free(threadpool);
3271 }
3272
3273 return ret;
3274}
3275
3276enum ggml_status ggml_graph_compute_with_ctx(struct ggml_context * ctx, struct ggml_cgraph * cgraph, int n_threads) {
3277 struct ggml_cplan cplan = ggml_graph_plan(cgraph, n_threads, NULL);
3278
3279 cplan.work_data = (uint8_t *)ggml_new_buffer(ctx, cplan.work_size);
3280
3281 return ggml_graph_compute(cgraph, &cplan);
3282}
3283
3284void ggml_cpu_fp32_to_fp32(const float * x, float * y, int64_t n) {
3285 memcpy(y, x, n * sizeof(float));
3286}
3287
3288void ggml_cpu_fp32_to_fp16(const float * x, ggml_fp16_t * y, int64_t n) {
3289 int64_t i = 0;
3290#if defined(__F16C__)
3291#if defined(__AVX512F__)
3292 for (; i + 15 < n; i += 16) {
3293 __m512 x_vec = _mm512_loadu_ps(x + i);
3294 __m256i y_vec = _mm512_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
3295 _mm256_storeu_si256((__m256i *)(y + i), y_vec);
3296 }
3297#endif
3298 for (; i + 7 < n; i += 8) {
3299 __m256 x_vec = _mm256_loadu_ps(x + i);
3300 __m128i y_vec = _mm256_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
3301 _mm_storeu_si128((__m128i *)(y + i), y_vec);
3302 }
3303 for (; i + 3 < n; i += 4) {
3304 __m128 x_vec = _mm_loadu_ps(x + i);
3305 __m128i y_vec = _mm_cvtps_ph(x_vec, _MM_FROUND_TO_NEAREST_INT);
3306 _mm_storel_epi64((__m128i *)(y + i), y_vec);
3307 }
3308#elif defined(__riscv_zvfh)
3309 for (int vl; i < n; i += vl) {
3310 vl = __riscv_vsetvl_e32m2(n - i);
3311 vfloat32m2_t vx = __riscv_vle32_v_f32m2(&x[i], vl);
3312 vfloat16m1_t vy = __riscv_vfncvt_f_f_w_f16m1(vx, vl);
3313 __riscv_vse16_v_f16m1((_Float16 *)&y[i], vy, vl);
3314 }
3315#endif
3316 for (; i < n; ++i) {
3317 y[i] = GGML_CPU_FP32_TO_FP16(x[i]);
3318 }
3319}
3320
3321void ggml_cpu_fp16_to_fp32(const ggml_fp16_t * x, float * y, int64_t n) {
3322 int64_t i = 0;
3323#if defined(__F16C__)
3324#if defined(__AVX512F__)
3325 for (; i + 15 < n; i += 16) {
3326 __m256i x_vec = _mm256_loadu_si256((const __m256i *)(x + i));
3327 __m512 y_vec = _mm512_cvtph_ps(x_vec);
3328 _mm512_storeu_ps(y + i, y_vec);
3329 }
3330#endif
3331 for (; i + 7 < n; i += 8) {
3332 __m128i x_vec = _mm_loadu_si128((const __m128i *)(x + i));
3333 __m256 y_vec = _mm256_cvtph_ps(x_vec);
3334 _mm256_storeu_ps(y + i, y_vec);
3335 }
3336 for (; i + 3 < n; i += 4) {
3337 __m128i x_vec = _mm_loadl_epi64((const __m128i *)(x + i));
3338 __m128 y_vec = _mm_cvtph_ps(x_vec);
3339 _mm_storeu_ps(y + i, y_vec);
3340 }
3341
3342#elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfhmin)
3343 // calculate step size
3344 const int epr = __riscv_vsetvlmax_e16m2();
3345 const int step = epr * 2;
3346 const int np = (n & ~(step - 1));
3347
3348 // unroll by 2
3349 for (; i < np; i += step) {
3350 vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, epr);
3351 vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, epr);
3352 __riscv_vse32_v_f32m4(y + i, ay0, epr);
3353
3354 vfloat16m2_t ax1 = __riscv_vle16_v_f16m2((const _Float16*)x + i + epr, epr);
3355 vfloat32m4_t ay1 = __riscv_vfwcvt_f_f_v_f32m4(ax1, epr);
3356 __riscv_vse32_v_f32m4(y + i + epr, ay1, epr);
3357 }
3358
3359 // leftovers
3360 int vl;
3361 for (i = np; i < n; i += vl) {
3362 vl = __riscv_vsetvl_e16m2(n - i);
3363 vfloat16m2_t ax0 = __riscv_vle16_v_f16m2((const _Float16*)x + i, vl);
3364 vfloat32m4_t ay0 = __riscv_vfwcvt_f_f_v_f32m4(ax0, vl);
3365 __riscv_vse32_v_f32m4(y + i, ay0, vl);
3366 }
3367
3368#endif
3369
3370 for (; i < n; ++i) {
3371 y[i] = GGML_CPU_FP16_TO_FP32(x[i]);
3372 }
3373}
3374
3375void ggml_cpu_fp32_to_bf16(const float * x, ggml_bf16_t * y, int64_t n) {
3376 int64_t i = 0;
3377 for (; i < n; ++i) {
3378 y[i] = GGML_FP32_TO_BF16(x[i]);
3379 }
3380}
3381
3382void ggml_cpu_fp32_to_i32(const float * x, int32_t * y, int64_t n) {
3383 int64_t i = 0;
3384 for (; i < n; ++i) {
3385 y[i] = x[i];
3386 }
3387}
3388
3389void ggml_cpu_bf16_to_fp32(const ggml_bf16_t * x, float * y, int64_t n) {
3390 int64_t i = 0;
3391#if defined(__AVX2__)
3392#if defined(__AVX512F__)
3393 for (; i + 15 < n; i += 16) {
3394 _mm512_storeu_ps(y + i,
3395 _mm512_castsi512_ps(
3396 _mm512_slli_epi32(
3397 _mm512_cvtepu16_epi32(
3398 _mm256_loadu_si256(
3399 (const __m256i *)(x + i))),
3400 16)));
3401 }
3402#endif
3403 for (; i + 7 < n; i += 8) {
3404 _mm256_storeu_ps(y + i,
3405 _mm256_castsi256_ps(
3406 _mm256_slli_epi32(
3407 _mm256_cvtepu16_epi32(
3408 _mm_loadu_si128(
3409 (const __m128i *)(x + i))),
3410 16)));
3411 }
3412#elif defined(__riscv_v_intrinsic) && defined(__riscv_zvfbfmin)
3413 // calculate step size
3414 const int epr = __riscv_vsetvlmax_e16m2();
3415 const int step = epr * 2;
3416 const int np = (n & ~(step - 1));
3417
3418 // unroll by 2
3419 for (; i < np; i += step) {
3420 vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, epr);
3421 vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, epr);
3422 __riscv_vse32_v_f32m4(y + i, ay0, epr);
3423
3424 vbfloat16m2_t ax1 = __riscv_vle16_v_bf16m2((const __bf16*)x + i + epr, epr);
3425 vfloat32m4_t ay1 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax1, epr);
3426 __riscv_vse32_v_f32m4(y + i + epr, ay1, epr);
3427 }
3428
3429 // leftovers
3430 int vl;
3431 for (i = np; i < n; i += vl) {
3432 vl = __riscv_vsetvl_e16m2(n - i);
3433 vbfloat16m2_t ax0 = __riscv_vle16_v_bf16m2((const __bf16*)x + i, vl);
3434 vfloat32m4_t ay0 = __riscv_vfwcvtbf16_f_f_v_f32m4(ax0, vl);
3435 __riscv_vse32_v_f32m4(y + i, ay0, vl);
3436 }
3437#endif
3438 for (; i < n; i++) {
3439 y[i] = GGML_BF16_TO_FP32(x[i]);
3440 }
3441}
3442
3443int ggml_cpu_has_avx(void) {
3444#if defined(__AVX__)
3445 return 1;
3446#else
3447 return 0;
3448#endif
3449}
3450
3451int ggml_cpu_has_avx_vnni(void) {
3452#if defined(__AVXVNNI__)
3453 return 1;
3454#else
3455 return 0;
3456#endif
3457}
3458
3459int ggml_cpu_has_avx2(void) {
3460#if defined(__AVX2__)
3461 return 1;
3462#else
3463 return 0;
3464#endif
3465}
3466
3467int ggml_cpu_has_avx512(void) {
3468#if defined(__AVX512F__)
3469 return 1;
3470#else
3471 return 0;
3472#endif
3473}
3474
3475int ggml_cpu_has_avx512_vbmi(void) {
3476#if defined(__AVX512VBMI__)
3477 return 1;
3478#else
3479 return 0;
3480#endif
3481}
3482
3483int ggml_cpu_has_avx512_vnni(void) {
3484#if defined(__AVX512VNNI__)
3485 return 1;
3486#else
3487 return 0;
3488#endif
3489}
3490
3491int ggml_cpu_has_avx512_bf16(void) {
3492#if defined(__AVX512BF16__)
3493 return 1;
3494#else
3495 return 0;
3496#endif
3497}
3498
3499int ggml_cpu_has_amx_int8(void) {
3500#if defined(__AMX_INT8__)
3501 return 1;
3502#else
3503 return 0;
3504#endif
3505}
3506
3507int ggml_cpu_has_bmi2(void) {
3508#if defined(__BMI2__)
3509 return 1;
3510#else
3511 return 0;
3512#endif
3513}
3514
3515int ggml_cpu_has_fma(void) {
3516#if defined(__FMA__)
3517 return 1;
3518#else
3519 return 0;
3520#endif
3521}
3522
3523int ggml_cpu_has_arm_fma(void) {
3524#if defined(__ARM_FEATURE_FMA)
3525 return 1;
3526#else
3527 return 0;
3528#endif
3529}
3530
3531int ggml_cpu_has_riscv_v(void) {
3532#if defined(__riscv_v_intrinsic)
3533 return 1;
3534#else
3535 return 0;
3536#endif
3537}
3538
3539int ggml_cpu_get_rvv_vlen(void) {
3540#if defined(__riscv) && defined(__riscv_v_intrinsic)
3541 return ggml_riscv_arch_features.rvv_vlen;
3542#else
3543 return 0;
3544#endif
3545}
3546
3547int ggml_cpu_has_f16c(void) {
3548#if defined(__F16C__)
3549 return 1;
3550#else
3551 return 0;
3552#endif
3553}
3554
3555int ggml_cpu_has_fp16_va(void) {
3556#if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC)
3557 return 1;
3558#else
3559 return 0;
3560#endif
3561}
3562
3563int ggml_cpu_has_wasm_simd(void) {
3564#if defined(__wasm_simd128__)
3565 return 1;
3566#else
3567 return 0;
3568#endif
3569}
3570
3571int ggml_cpu_has_llamafile(void) {
3572#if defined(GGML_USE_LLAMAFILE)
3573 return 1;
3574#else
3575 return 0;
3576#endif
3577}
3578
3579int ggml_cpu_has_sse3(void) {
3580#if defined(__SSE3__)
3581 return 1;
3582#else
3583 return 0;
3584#endif
3585}
3586
3587int ggml_cpu_has_ssse3(void) {
3588#if defined(__SSSE3__)
3589 return 1;
3590#else
3591 return 0;
3592#endif
3593}
3594
3595int ggml_cpu_has_vsx(void) {
3596#if defined(__POWER9_VECTOR__)
3597 return 1;
3598#else
3599 return 0;
3600#endif
3601}
3602
3603int ggml_cpu_has_vxe(void) {
3604#if defined(__VXE__) || defined(__VXE2__)
3605 return 1;
3606#else
3607 return 0;
3608#endif
3609}
3610
3611int ggml_cpu_has_neon(void) {
3612#if defined(__ARM_ARCH) && defined(__ARM_NEON)
3613 return 1;
3614#else
3615 return 0;
3616#endif
3617}
3618
3619int ggml_cpu_has_dotprod(void) {
3620#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_DOTPROD)
3621 return 1;
3622#else
3623 return 0;
3624#endif
3625}
3626
3627int ggml_cpu_has_sve(void) {
3628#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE)
3629 return 1;
3630#else
3631 return 0;
3632#endif
3633}
3634
3635int ggml_cpu_has_matmul_int8(void) {
3636#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_MATMUL_INT8)
3637 return 1;
3638#else
3639 return 0;
3640#endif
3641}
3642
3643int ggml_cpu_get_sve_cnt(void) {
3644#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SVE)
3645 return ggml_arm_arch_features.sve_cnt;
3646#else
3647 return 0;
3648#endif
3649}
3650
3651int ggml_cpu_has_sme(void) {
3652#if defined(__ARM_ARCH) && defined(__ARM_FEATURE_SME)
3653 return 1;
3654#else
3655 return 0;
3656#endif
3657}
3658
3659void ggml_cpu_init(void) {
3660 // needed to initialize ggml_time
3661 {
3662 struct ggml_init_params params = { 0, NULL, false };
3663 struct ggml_context * ctx = ggml_init(params);
3664 ggml_free(ctx);
3665 }
3666
3667 ggml_critical_section_start();
3668
3669 static bool is_first_call = true;
3670
3671 if (is_first_call) {
3672 // initialize GELU, Quick GELU, SILU and EXP F32 tables
3673 {
3674 const uint64_t t_start = ggml_time_us(); UNUSED(t_start);
3675
3676 for (int i = 0; i < (1 << 16); ++i) {
3677 union {
3678 uint16_t u16;
3679 ggml_fp16_t fp16;
3680 } u = {i};
3681 float f = GGML_COMPUTE_FP16_TO_FP32(u.fp16);
3682 ggml_table_f32_f16[i] = f;
3683 ggml_table_gelu_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_f32(f));
3684 ggml_table_gelu_quick_f16[i] = GGML_CPU_FP32_TO_FP16(ggml_gelu_quick_f32(f));
3685 }
3686
3687 // initialize E8M0 half table (256 entries)
3688 for (int i = 0; i < (1 << 8); ++i) {
3689 ggml_table_f32_e8m0_half[i] = GGML_E8M0_TO_FP32_HALF(i);
3690 }
3691
3692 const uint64_t t_end = ggml_time_us(); UNUSED(t_end);
3693
3694 GGML_PRINT_DEBUG("%s: GELU, Quick GELU, SILU and EXP tables initialized in %f ms\n", __func__, (t_end - t_start)/1000.0);
3695
3696#ifdef GGML_USE_OPENMP
3697 //if (!getenv("OMP_WAIT_POLICY")) {
3698 // // set the wait policy to active, so that OpenMP threads don't sleep
3699 // setenv("OMP_WAIT_POLICY", "active", 0)
3700 //}
3701
3702 if (!getenv("KMP_BLOCKTIME")) {
3703 // set the time to wait before sleeping a thread
3704 // this is less aggressive than setting the wait policy to active, but should achieve similar results in most cases
3705#ifdef _WIN32
3706 _putenv_s("KMP_BLOCKTIME", "200"); // 200ms
3707#else
3708 setenv("KMP_BLOCKTIME", "200", 0); // 200ms
3709#endif
3710 }
3711#endif
3712 }
3713
3714#if defined(__ARM_ARCH)
3715 ggml_init_arm_arch_features();
3716#endif
3717
3718#if defined(__riscv)
3719 ggml_init_riscv_arch_features();
3720#endif
3721
3722 is_first_call = false;
3723 }
3724
3725 ggml_critical_section_end();
3726}