1#include "mean.cuh"
2#include "reduce_rows.cuh"
3
4#ifdef GGML_CUDA_USE_CUB
5#include <cub/cub.cuh>
6using namespace cub;
7#endif // GGML_CUDA_USE_CUB
8
9template <typename T> __global__ void divide_by_count(T * result, size_t count) {
10 *result /= static_cast<T>(count);
11}
12
13void ggml_cuda_op_mean(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
14 const ggml_tensor * src0 = dst->src[0];
15 const float * src0_d = (const float *) src0->data;
16 float * dst_d = (float *) dst->data;
17 cudaStream_t stream = ctx.stream();
18
19 GGML_ASSERT(src0->type == GGML_TYPE_F32);
20 GGML_ASSERT(dst->type == GGML_TYPE_F32);
21 GGML_ASSERT(ggml_is_contiguous(src0));
22
23 const int64_t ncols = src0->ne[0];
24 const int64_t nrows = ggml_nrows(src0);
25
26// Special case for reducing vectors
27#ifdef GGML_CUDA_USE_CUB
28#ifdef USE_CUDA_GRAPH
29 cudaStreamCaptureStatus iscapturing;
30 CUDA_CHECK(cudaStreamIsCapturing(stream, &iscapturing));
31#endif // USE_CUDA_GRAPH
32 if ((nrows == 1) &&
33#ifdef USE_CUDA_GRAPH
34 // Determine if CUDA graphs are effectively disabled for this context
35 // (no graph instance exists and we're not capturing, OR graphs are explicitly enabled)
36 (((ncols > 65536) &&
37 (((!ctx.any_cuda_graph_has_instance()) && (iscapturing == cudaStreamCaptureStatusNone)) ||
38 ctx.any_cuda_graph_enabled())) ||
39 // CUDA graphs are enabled - use lower threshold
40 ((ncols > 32768) &&
41 !(((!ctx.any_cuda_graph_has_instance()) && (iscapturing == cudaStreamCaptureStatusNone)) ||
42 ctx.any_cuda_graph_enabled())))) {
43#else
44 (ncols > 65536)) {
45#endif // USE_CUDA_GRAPH
46 // Single row - use device-wide reduction
47 size_t tmp_size = 0;
48 ggml_cuda_pool & pool = ctx.pool();
49
50 DeviceReduce::Sum(nullptr, tmp_size, src0_d, dst_d, ncols, stream);
51
52 ggml_cuda_pool_alloc<uint8_t> tmp_alloc(pool, tmp_size);
53 DeviceReduce::Sum(tmp_alloc.ptr, tmp_size, src0_d, dst_d, ncols, stream);
54
55 // Divide by ncols
56 divide_by_count<float><<<1, 1, 0, stream>>>(dst_d, ncols);
57 return;
58 }
59#endif // GGML_CUDA_USE_CUB
60
61 const dim3 block_nums(nrows, 1, 1);
62
63 const int id = ggml_cuda_get_device();
64 const int nsm = ggml_cuda_info().devices[id].nsm;
65
66 // Heuristic for block size selection to optimize occupancy.
67 // See discussion in: https://github.com/ggml-org/llama.cpp/pull/15132
68 if ((nrows / nsm) < 2) {
69 const dim3 block_dims(512, 1, 1);
70 reduce_rows_f32</*norm=*/true><<<block_nums, block_dims, 0, stream>>>(src0_d, dst_d, ncols);
71 } else {
72 const dim3 block_dims(ncols < 1024 ? 32 : 128, 1, 1);
73 reduce_rows_f32</*norm=*/true><<<block_nums, block_dims, 0, stream>>>(src0_d, dst_d, ncols);
74 }
75}