summaryrefslogtreecommitdiff
path: root/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu')
-rw-r--r--llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu150
1 files changed, 150 insertions, 0 deletions
diff --git a/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu b/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu
new file mode 100644
index 0000000..6d5ea70
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-cuda/ssm-conv.cu
@@ -0,0 +1,150 @@
+#include "ssm-conv.cuh"
+
+template <size_t split_d_inner, size_t d_conv>
+static __global__ void ssm_conv_f32(const float * __restrict__ src0, const float * __restrict__ src1,
+ const int src0_nb0, const int src0_nb1, const int src0_nb2, const int src1_nb1,
+ float * __restrict__ dst, const int dst_nb0, const int dst_nb1, const int dst_nb2,
+ const int64_t n_t) {
+ GGML_UNUSED(src0_nb0);
+ const int tid = threadIdx.x;
+ const int bidx = blockIdx.x;
+ const int bidy = blockIdx.y;
+
+ const float * x_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1);
+ const float * w_block = (const float *) ((const char *) src1 + bidy * split_d_inner * src1_nb1);
+ float * y_block = (float *) ((char *) dst + bidx * dst_nb2 + bidy * split_d_inner * dst_nb0);
+
+ const int stride_x = src0_nb1 / sizeof(float);
+ const int stride_w = src1_nb1 / sizeof(float);
+ const int stride_y = dst_nb1 / sizeof(float);
+
+ float x[d_conv] = { 0.0f };
+ float w[d_conv] = { 0.0f };
+
+#pragma unroll
+ for (size_t j = 0; j < d_conv; j++) {
+ w[j] = w_block[tid * stride_w + j];
+ }
+
+ for (int64_t i = 0; i < n_t; i++) {
+ float sumf = 0.0f;
+
+ if (i == 0) {
+ for (size_t j = 0; j < d_conv; j++) {
+ x[j] = x_block[tid * stride_x + j];
+ }
+ } else {
+ x[(i - 1) % d_conv] = x_block[tid * stride_x + i + d_conv - 1];
+ }
+
+#pragma unroll
+ for (size_t j = 0; j < d_conv; j++) {
+ sumf += x[(i + j) % d_conv] * w[j];
+ }
+ y_block[i * stride_y + tid] = sumf;
+ }
+}
+
+template <size_t split_d_inner, size_t d_conv, int64_t split_n_t>
+static __global__ void ssm_conv_long_token_f32(const float * __restrict__ src0, const float * __restrict__ src1,
+ const int src0_nb0, const int src0_nb1, const int src0_nb2,
+ const int src1_nb1, float * __restrict__ dst, const int dst_nb0,
+ const int dst_nb1, const int dst_nb2, const int64_t n_t) {
+ const int tid = threadIdx.x;
+ const int bidx = blockIdx.x;
+ const int bidy = blockIdx.y;
+ const int bidz = blockIdx.z;
+
+ const float * x_block = (const float *) ((const char *) src0 + bidx * src0_nb2 + bidy * split_d_inner * src0_nb1 +
+ bidz * split_n_t * src0_nb0);
+ const float * w_block = (const float *) ((const char *) src1 + bidy * split_d_inner * src1_nb1);
+ float * y_block =
+ (float *) ((char *) dst + bidx * dst_nb2 + bidz * split_n_t * dst_nb1 + bidy * split_d_inner * dst_nb0);
+
+ const int stride_x = src0_nb1 / sizeof(float);
+ const int stride_w = src1_nb1 / sizeof(float);
+ const int stride_y = dst_nb1 / sizeof(float);
+
+ float x[d_conv] = { 0.0f };
+ float w[d_conv] = { 0.0f };
+
+#pragma unroll
+ for (size_t j = 0; j < d_conv; j++) {
+ w[j] = w_block[tid * stride_w + j];
+ }
+
+#pragma unroll
+ for (int64_t i = 0; i < split_n_t; i++) {
+ if (bidz * split_n_t + i < n_t) {
+ float sumf = 0.0f;
+
+ if (i == 0) {
+ for (size_t j = 0; j < d_conv; j++) {
+ x[j] = x_block[tid * stride_x + j];
+ }
+ } else {
+ x[(i - 1) % d_conv] = x_block[tid * stride_x + i + d_conv - 1];
+ }
+
+#pragma unroll
+ for (size_t j = 0; j < d_conv; j++) {
+ sumf += x[(i + j) % d_conv] * w[j];
+ }
+ y_block[i * stride_y + tid] = sumf;
+ }
+ }
+}
+
+static void ssm_conv_f32_cuda(const float * src0, const float * src1, const int src0_nb0, const int src0_nb1,
+ const int src0_nb2, const int src1_nb1, float * dst, const int dst_nb0, const int dst_nb1,
+ const int dst_nb2, const int64_t nc, const int64_t nr, const int64_t n_t,
+ const int64_t n_s, cudaStream_t stream) {
+ const int threads = 128;
+ GGML_ASSERT(nr % threads == 0);
+
+ auto launch_kernel = [&](auto NC) {
+ constexpr int kNC = decltype(NC)::value;
+ if (n_t <= 32) {
+ const dim3 blocks(n_s, (nr + threads - 1) / threads, 1);
+ ssm_conv_f32<threads, kNC><<<blocks, threads, 0, stream>>>(src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1,
+ dst, dst_nb0, dst_nb1, dst_nb2, n_t);
+ } else {
+ const int64_t split_n_t = 32;
+ dim3 blocks(n_s, (nr + threads - 1) / threads, (n_t + split_n_t - 1) / split_n_t);
+ ssm_conv_long_token_f32<threads, kNC, split_n_t><<<blocks, threads, 0, stream>>>(
+ src0, src1, src0_nb0, src0_nb1, src0_nb2, src1_nb1, dst, dst_nb0, dst_nb1, dst_nb2, n_t);
+ }
+ };
+
+ switch (nc) {
+ case 3: launch_kernel(std::integral_constant<int, 3>{}); break;
+ case 4: launch_kernel(std::integral_constant<int, 4>{}); break;
+ case 9: launch_kernel(std::integral_constant<int, 9>{}); break;
+ default: GGML_ABORT("Only support kernel sizes 3, 4, 9 right now.");
+ }
+}
+
+void ggml_cuda_op_ssm_conv(ggml_backend_cuda_context & ctx, ggml_tensor * dst) {
+ const struct ggml_tensor * src0 = dst->src[0]; // conv_x
+ const struct ggml_tensor * src1 = dst->src[1]; // conv1d.weight
+
+ const int64_t nc = src1->ne[0]; // d_conv
+ const int64_t nr = src0->ne[1]; // d_inner
+ const int64_t n_t = dst->ne[1]; // tokens per sequence
+ const int64_t n_s = dst->ne[2]; // number of sequences in the batch
+
+ GGML_ASSERT(dst->ne[0] == nr);
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
+ GGML_ASSERT(src1->nb[0] == sizeof(float));
+ GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float));
+
+ const float * src0_d = (const float *) src0->data;
+ const float * src1_d = (const float *) src1->data;
+ float * dst_d = (float *) dst->data;
+ cudaStream_t stream = ctx.stream();
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ ssm_conv_f32_cuda(src0_d, src1_d, src0->nb[0], src0->nb[1], src0->nb[2], src1->nb[1], dst_d, dst->nb[0], dst->nb[1],
+ dst->nb[2], nc, nr, n_t, n_s, stream);
+}