1#include <sycl/sycl.hpp>
2
3#include "common.hpp"
4
5template <u_int HEAD_SIZE>
6static void gated_linear_attn_f32_kernel(const dpct::queue_ptr stream, u_int B, u_int T, u_int C, u_int H, float scale,
7 const float * k, const float * v, const float * r, const float * td,
8 const float * s, float * dst) {
9 const u_int head_size = HEAD_SIZE;
10 const u_int state_size = C * head_size;
11 const u_int n_seq_tokens = T / B;
12 sycl::range<1> block_dims((C / H));
13 sycl::range<1> grid_dims((B * H));
14 stream->submit([&](sycl::handler & cgh) {
15 /* local memory accessors*/
16 auto _k = sycl::local_accessor<float, 1>(sycl::range<1>(head_size), cgh);
17 auto _r = sycl::local_accessor<float, 1>(sycl::range<1>(head_size), cgh);
18 auto _td = sycl::local_accessor<float, 1>(sycl::range<1>(head_size), cgh);
19
20 cgh.parallel_for(sycl::nd_range<1>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<1> item) {
21 u_int tid = item.get_local_id(0);
22 u_int bid = item.get_group(0);
23
24 u_int batch_i = bid / H;
25 u_int head_i = bid % H;
26
27 float state[head_size];
28
29#pragma unroll
30 for (u_int i = 0; i < head_size; i++) {
31 state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid];
32 }
33
34 for (u_int t = batch_i * n_seq_tokens * C + head_i * head_size + tid;
35 t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) {
36
37 item.barrier(sycl::access::fence_space::local_space); //sync threads
38 _k[tid] = k[t];
39 _r[tid] = r[t];
40 _td[tid] = td[t];
41 item.barrier(sycl::access::fence_space::local_space); //sync threads
42
43 const float _v = v[t];
44 float y = 0;
45
46 for (u_int j = 0; j < head_size; j += 4) {
47 const sycl::float4 & k = (sycl::float4 &) (_k[j]);
48 const sycl::float4 & r = (sycl::float4 &) (_r[j]);
49 const sycl::float4 & td = (sycl::float4 &) (_td[j]);
50 sycl::float4 & s = (sycl::float4 &) (state[j]);
51 sycl::float4 kv;
52
53 kv.x() = k.x() * _v;
54 kv.y() = k.y() * _v;
55 kv.z() = k.z() * _v;
56 kv.w() = k.w() * _v;
57
58 s.x() = s.x() * td.x() + kv.x();
59 s.y() = s.y() * td.y() + kv.y();
60 s.z() = s.z() * td.z() + kv.z();
61 s.w() = s.w() * td.w() + kv.w();
62
63 y += r.x() * s.x();
64 y += r.y() * s.y();
65 y += r.z() * s.z();
66 y += r.w() * s.w();
67 }
68 dst[t] = y * scale;
69 }
70#pragma unroll
71 for (u_int i = 0; i < head_size; i++) {
72 dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i];
73 }
74 });
75 });
76}
77
78void ggml_sycl_op_gated_linear_attn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
79 scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/5);
80 const float * k_d = static_cast<const float *>(dst->src[0]->data);
81 const float * v_d = static_cast<const float *>(dst->src[1]->data);
82 const float * r_d = static_cast<const float *>(dst->src[2]->data);
83 const float * td_d = static_cast<const float *>(dst->src[3]->data);
84 const float * s_d = static_cast<const float *>(dst->src[4]->data);
85
86 const int64_t B = dst->src[4]->ne[1];
87 const int64_t T = dst->src[0]->ne[2];
88 const int64_t C = dst->ne[0];
89 const int64_t H = dst->src[0]->ne[1];
90
91 dpct::queue_ptr stream = ctx.stream();
92 GGML_ASSERT(dst->src[4]->type == GGML_TYPE_F32);
93 GGML_ASSERT(C % H == 0);
94 GGML_ASSERT(C / H == 64 || C / H == 128);
95
96 float scale;
97 memcpy(&scale, dst->op_params, sizeof(float));
98
99 float * dst_d = (float *) dst->data;
100
101 if (C / H == 64) {
102 gated_linear_attn_f32_kernel<64>(stream, B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d);
103 } else {
104 gated_linear_attn_f32_kernel<128>(stream, B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d);
105 }
106}