summaryrefslogtreecommitdiff
path: root/llama.cpp/ggml/src/ggml-cuda/sum.cu
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/ggml/src/ggml-cuda/sum.cu
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/ggml/src/ggml-cuda/sum.cu')
-rw-r--r--llama.cpp/ggml/src/ggml-cuda/sum.cu41
1 files changed, 41 insertions, 0 deletions
diff --git a/llama.cpp/ggml/src/ggml-cuda/sum.cu b/llama.cpp/ggml/src/ggml-cuda/sum.cu
new file mode 100644
index 0000000..c56257b
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-cuda/sum.cu
@@ -0,0 +1,41 @@
+#include "sum.cuh"
+#include "sumrows.cuh"
+
+#ifdef GGML_CUDA_USE_CUB
+#include <cub/cub.cuh>
+using namespace cub;
+#endif // GGML_CUDA_USE_CUB
+
+#include <cstdint>
+
+void sum_f32_cuda(ggml_cuda_pool & pool, const float * x, float * dst, const int64_t ne, cudaStream_t stream) {
+#ifdef GGML_CUDA_USE_CUB
+ size_t tmp_size = 0;
+ DeviceReduce::Sum(nullptr, tmp_size, x, dst, ne, stream);
+ ggml_cuda_pool_alloc<uint8_t> tmp_alloc(pool, tmp_size);
+ DeviceReduce::Sum(tmp_alloc.ptr, tmp_size, x, dst, ne, stream);
+#else
+ // Use (inefficient) sum_rows implementation as a fallback.
+ // For AMD there is rocPRIM which could be used as a drop-in replacement via hipcub but this would require C++11 -> C++14.
+ sum_rows_f32_cuda(x, dst, ne, 1, stream);
+ GGML_UNUSED(pool);
+#endif // GGML_CUDA_USE_CUB
+}
+
+void ggml_cuda_op_sum(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(ggml_is_contiguously_allocated(src0));
+
+ const float * src0_d = (const float *) src0->data;
+ float * dst_d = (float *) dst->data;
+
+ const int64_t ne = ggml_nelements(src0);
+
+ ggml_cuda_pool & pool = ctx.pool();
+ cudaStream_t stream = ctx.stream();
+
+ sum_f32_cuda(pool, src0_d, dst_d, ne, stream);
+}