1#pragma OPENCL EXTENSION cl_khr_fp16 : enable
2
3#define ACC_TYPE float
4#define ACC_TYPE4 float4
5#define Q_DATA_TYPE4 float4
6#define KV_DATA_TYPE4 half4
7#define O_DATA_TYPE4 float4
8#define MASK_DATA_TYPE half
9#define CONVERT_Q_ACC4(x) (x)
10#define CONVERT_KV_ACC4(x) convert_float4(x)
11#define CONVERT_O_DATA4(x) (x)
12
13#define DK_VEC (DK/4)
14#define DV_VEC (DV/4)
15#define WG_SIZE (BLOCK_M)
16#define Q1_WG_SIZE 64
17
18inline float get_alibi_slope(
19 const float max_bias, const uint h, const uint n_head_log2, const float m0, const float m1
20) {
21 if (max_bias <= 0.0f) {
22 return 1.0f;
23 }
24 const float base = h < n_head_log2 ? m0 : m1;
25 const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
26
27 return pow(base, exph);
28}
29__kernel void flash_attn_f32_f16(
30 const global void * q_void, ulong q_offset,
31 const global void * k_void, ulong k_offset,
32 const global void * v_void, ulong v_offset,
33 global void * o_void, ulong o_offset,
34 const float scale,
35 const int n_q,
36 const int n_kv,
37 const int is_causal,
38 const int n_head,
39 const ulong q_nb1, const ulong q_nb2, const ulong q_nb3,
40 const ulong k_nb1, const ulong k_nb2, const ulong k_nb3,
41 const ulong v_nb1, const ulong v_nb2, const ulong v_nb3,
42 const ulong o_nb1, const ulong o_nb2, const ulong o_nb3,
43 const float max_bias,
44 const float m0,
45 const float m1,
46 const int n_head_log2,
47 const float logit_softcap,
48 const int n_head_kv,
49 const global void* mask_void,
50 const ulong mask_offset,
51 const ulong mask_nb1,
52 const ulong mask_nb2,
53 const ulong mask_nb3,
54 const int mask_ne2,
55 const int mask_ne3,
56 const global void* sinks_void,
57 const ulong sinks_offset
58) {
59 const int tid = get_local_id(0);
60 const int block_q_idx = get_group_id(0);
61 const int head_batch_idx = get_global_id(1);
62
63 const int my_query_row = block_q_idx * BLOCK_M + tid;
64
65 const int batch_idx = head_batch_idx / n_head;
66 const int head_idx = head_batch_idx % n_head;
67
68 const int gqa_ratio = n_head / n_head_kv;
69 const int head_kv_idx = head_idx / gqa_ratio;
70
71 const global char* q_base = (const global char*)q_void + q_offset;
72 const global char* k_base = (const global char*)k_void + k_offset;
73 const global char* v_base = (const global char*)v_void + v_offset;
74 global char* o_base = (global char*)o_void + o_offset;
75
76 const global char* mask_base = NULL;
77 if (mask_void != NULL) {
78 const int mask_head_idx = head_idx % mask_ne2;
79 const int mask_batch_idx = batch_idx % mask_ne3;
80 mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2;
81 }
82
83 ACC_TYPE4 q_priv[DK_VEC];
84 if (my_query_row < n_q) {
85 const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2 + my_query_row * q_nb1;
86 const global Q_DATA_TYPE4* q_ptr = (const global Q_DATA_TYPE4*)(q_base + q_row_offset);
87 #pragma unroll
88 for (int i = 0; i < DK_VEC; ++i) {
89 q_priv[i] = CONVERT_Q_ACC4(q_ptr[i]);
90 }
91 }
92
93 ACC_TYPE4 o_acc[DV_VEC];
94 #pragma unroll
95 for (int i = 0; i < DV_VEC; ++i) {
96 o_acc[i] = (ACC_TYPE4)(0.0f);
97 }
98 ACC_TYPE m_i = -INFINITY;
99 ACC_TYPE l_i = 0.0f;
100
101 float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1);
102
103 __local KV_DATA_TYPE4 l_k[BLOCK_N][DK_VEC];
104 __local KV_DATA_TYPE4 l_v[BLOCK_N][DV_VEC];
105
106 for (int k_start = 0; k_start < n_kv; k_start += BLOCK_N) {
107 for (int i = tid; i < BLOCK_N * DK_VEC; i += WG_SIZE) {
108 const int row = i / DK_VEC;
109 const int col = i % DK_VEC;
110 const int k_row_idx = k_start + row;
111 if (k_row_idx < n_kv) {
112 const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_row_idx * k_nb1;
113 l_k[row][col] = ((__global KV_DATA_TYPE4*)(k_base + k_row_offset))[col];
114 }
115 }
116 for (int i = tid; i < BLOCK_N * DV_VEC; i += WG_SIZE) {
117 const int row = i / DV_VEC;
118 const int col = i % DV_VEC;
119 const int v_row_idx = k_start + row;
120 if (v_row_idx < n_kv) {
121 const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + v_row_idx * v_nb1;
122 l_v[row][col] = ((__global KV_DATA_TYPE4*)(v_base + v_row_offset))[col];
123 }
124 }
125 barrier(CLK_LOCAL_MEM_FENCE);
126
127 if (my_query_row >= n_q) {
128 continue;
129 }
130
131 for (int j = 0; j < BLOCK_N; j += 2) {
132 const int k_row0 = k_start + j;
133 const int k_row1 = k_start + j + 1;
134
135 ACC_TYPE4 dot_acc0 = (ACC_TYPE4)(0.0f);
136 ACC_TYPE4 dot_acc1 = (ACC_TYPE4)(0.0f);
137 #pragma unroll
138 for (int k = 0; k < DK_VEC; k++) {
139 dot_acc0 = mad(q_priv[k], CONVERT_KV_ACC4(l_k[j][k]), dot_acc0);
140 dot_acc1 = mad(q_priv[k], CONVERT_KV_ACC4(l_k[j+1][k]), dot_acc1);
141 }
142 ACC_TYPE score0 = (dot_acc0.s0 + dot_acc0.s1 + dot_acc0.s2 + dot_acc0.s3) * scale;
143 ACC_TYPE score1 = (dot_acc1.s0 + dot_acc1.s1 + dot_acc1.s2 + dot_acc1.s3) * scale;
144
145 if (is_causal) {
146 if (k_row0 > (n_kv - n_q + my_query_row)) score0 = -INFINITY;
147 if (k_row1 > (n_kv - n_q + my_query_row)) score1 = -INFINITY;
148 }
149
150 if (k_row0 >= n_kv) score0 = -INFINITY;
151 if (k_row1 >= n_kv) score1 = -INFINITY;
152
153 if (mask_base != NULL) {
154 const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base + my_query_row * mask_nb1);
155 if (k_row0 < n_kv) score0 += slope * (ACC_TYPE)mask_ptr[k_row0];
156 if (k_row1 < n_kv) score1 += slope * (ACC_TYPE)mask_ptr[k_row1];
157 }
158
159 if (logit_softcap > 0.0f) {
160 score0 = logit_softcap * tanh(score0 / logit_softcap);
161 score1 = logit_softcap * tanh(score1 / logit_softcap);
162 }
163
164 const ACC_TYPE m_new = max(m_i, max(score0, score1));
165 const ACC_TYPE p0 = exp(score0 - m_new);
166 const ACC_TYPE p1 = exp(score1 - m_new);
167 const ACC_TYPE scale_prev = exp(m_i - m_new);
168
169 #pragma unroll
170 for (int i = 0; i < DV_VEC; ++i) {
171 o_acc[i] = o_acc[i] * scale_prev + p0 * CONVERT_KV_ACC4(l_v[j][i]) + p1 * CONVERT_KV_ACC4(l_v[j+1][i]);
172 }
173 l_i = l_i * scale_prev + p0 + p1;
174 m_i = m_new;
175 }
176 }
177
178 if (my_query_row < n_q) {
179 if (sinks_void != NULL) {
180 const global ACC_TYPE* sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset);
181 const ACC_TYPE m_sink = sinks_ptr[head_idx];
182 const ACC_TYPE m_final = max(m_i, m_sink);
183
184 const ACC_TYPE scale_o = exp(m_i - m_final);
185 #pragma unroll
186 for (int i = 0; i < DV_VEC; ++i) {
187 o_acc[i] *= scale_o;
188 }
189
190 l_i = l_i * exp(m_i - m_final) + exp(m_sink - m_final);
191 }
192
193 const ulong o_row_offset = batch_idx * o_nb3 + my_query_row * o_nb2 + head_idx * o_nb1;
194 global O_DATA_TYPE4 *o_row = (global O_DATA_TYPE4 *)(o_base + o_row_offset);
195 if (l_i > 0.0f) {
196 const ACC_TYPE l_inv = 1.0f / l_i;
197 #pragma unroll
198 for (int i = 0; i < DV_VEC; ++i) {
199 o_row[i] = CONVERT_O_DATA4(o_acc[i] * l_inv);
200 }
201 } else {
202 #pragma unroll
203 for (int i = 0; i < DV_VEC; ++i) {
204 o_row[i] = (O_DATA_TYPE4)(0.0f);
205 }
206 }
207 }
208}
209
210__kernel void flash_attn_f32_f16_q1(
211 const global void * q_void, ulong q_offset,
212 const global void * k_void, ulong k_offset,
213 const global void * v_void, ulong v_offset,
214 global void * o_void, ulong o_offset,
215 const float scale,
216 const int n_q,
217 const int n_kv,
218 const int is_causal,
219 const int n_head,
220 const ulong q_nb1, const ulong q_nb2, const ulong q_nb3,
221 const ulong k_nb1, const ulong k_nb2, const ulong k_nb3,
222 const ulong v_nb1, const ulong v_nb2, const ulong v_nb3,
223 const ulong o_nb1, const ulong o_nb2, const ulong o_nb3,
224 const float max_bias,
225 const float m0,
226 const float m1,
227 const int n_head_log2,
228 const float logit_softcap,
229 const int n_head_kv,
230 const global void* mask_void,
231 const ulong mask_offset,
232 const ulong mask_nb1,
233 const ulong mask_nb2,
234 const ulong mask_nb3,
235 const int mask_ne2,
236 const int mask_ne3,
237 const global void* sinks_void,
238 const ulong sinks_offset
239) {
240 const int tid = get_local_id(0);
241 const int head_batch_idx = get_global_id(1);
242
243 const int batch_idx = head_batch_idx / n_head;
244 const int head_idx = head_batch_idx % n_head;
245
246 const int gqa_ratio = n_head / n_head_kv;
247 const int head_kv_idx = head_idx / gqa_ratio;
248
249 const global char* q_base = (const global char*)q_void + q_offset;
250 const global char* k_base = (const global char*)k_void + k_offset;
251 const global char* v_base = (const global char*)v_void + v_offset;
252 global char* o_base = (global char*)o_void + o_offset;
253
254 const global char* mask_base = NULL;
255 if (mask_void != NULL) {
256 const int mask_head_idx = head_idx % mask_ne2;
257 const int mask_batch_idx = batch_idx % mask_ne3;
258 mask_base = (const global char*)mask_void + mask_offset + mask_batch_idx * mask_nb3 + mask_head_idx * mask_nb2;
259 }
260
261 ACC_TYPE4 q_priv[DK_VEC];
262 const ulong q_row_offset = batch_idx * q_nb3 + head_idx * q_nb2;
263 const global Q_DATA_TYPE4* q_ptr = (const global Q_DATA_TYPE4*)(q_base + q_row_offset);
264 #pragma unroll
265 for (int i = 0; i < DK_VEC; ++i) {
266 q_priv[i] = CONVERT_Q_ACC4(q_ptr[i]);
267 }
268
269 float slope = get_alibi_slope(max_bias, head_idx, n_head_log2, m0, m1);
270
271 const global ACC_TYPE* sinks_ptr = NULL;
272 if (sinks_void != NULL) {
273 sinks_ptr = (const global ACC_TYPE*)((const global char*)sinks_void + sinks_offset);
274 }
275
276 ACC_TYPE m_i = (sinks_ptr != NULL) ? sinks_ptr[head_idx] : -INFINITY;
277 for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) {
278 const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1;
279 const global KV_DATA_TYPE4* k_ptr = (const global KV_DATA_TYPE4*)(k_base + k_row_offset);
280 ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f);
281 #pragma unroll
282 for (int k = 0; k < DK_VEC; k++) {
283 dot_acc = mad(q_priv[k], CONVERT_KV_ACC4(k_ptr[k]), dot_acc);
284 }
285 ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale;
286 if (mask_base != NULL) {
287 const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base);
288 score += slope * (ACC_TYPE)mask_ptr[k_idx];
289 }
290 if (logit_softcap > 0.0f) {
291 score = logit_softcap * tanh(score / logit_softcap);
292 }
293 m_i = max(m_i, score);
294 }
295
296 __local ACC_TYPE local_m[Q1_WG_SIZE];
297 local_m[tid] = m_i;
298 barrier(CLK_LOCAL_MEM_FENCE);
299 #pragma unroll
300 for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) {
301 if (tid < s) local_m[tid] = max(local_m[tid], local_m[tid + s]);
302 barrier(CLK_LOCAL_MEM_FENCE);
303 }
304 const ACC_TYPE m_final = local_m[0];
305
306 ACC_TYPE4 o_acc[DV_VEC];
307 #pragma unroll
308 for (int i = 0; i < DV_VEC; ++i) o_acc[i] = (ACC_TYPE4)(0.0f);
309 ACC_TYPE l_i = 0.0f;
310
311 for (int k_idx = tid; k_idx < n_kv; k_idx += Q1_WG_SIZE) {
312 const ulong k_row_offset = batch_idx * k_nb3 + head_kv_idx * k_nb2 + k_idx * k_nb1;
313 const ulong v_row_offset = batch_idx * v_nb3 + head_kv_idx * v_nb2 + k_idx * v_nb1;
314 const global KV_DATA_TYPE4* k_ptr = (const global KV_DATA_TYPE4*)(k_base + k_row_offset);
315 const global KV_DATA_TYPE4* v_ptr = (const global KV_DATA_TYPE4*)(v_base + v_row_offset);
316 ACC_TYPE4 dot_acc = (ACC_TYPE4)(0.0f);
317 #pragma unroll
318 for (int k = 0; k < DK_VEC; k++) {
319 dot_acc = mad(q_priv[k], CONVERT_KV_ACC4(k_ptr[k]), dot_acc);
320 }
321 ACC_TYPE score = (dot_acc.s0 + dot_acc.s1 + dot_acc.s2 + dot_acc.s3) * scale;
322 if (mask_base != NULL) {
323 const global MASK_DATA_TYPE* mask_ptr = (const global MASK_DATA_TYPE*)(mask_base);
324 score += slope * (ACC_TYPE)mask_ptr[k_idx];
325 }
326 if (logit_softcap > 0.0f) {
327 score = logit_softcap * tanh(score / logit_softcap);
328 }
329 const ACC_TYPE p = exp(score - m_final);
330 l_i += p;
331 #pragma unroll
332 for (int i = 0; i < DV_VEC; i++) {
333 o_acc[i] = mad(p, CONVERT_KV_ACC4(v_ptr[i]), o_acc[i]);
334 }
335 }
336
337 __local ACC_TYPE local_l[Q1_WG_SIZE];
338 __local ACC_TYPE4 local_o_comp[Q1_WG_SIZE];
339 local_l[tid] = l_i;
340 barrier(CLK_LOCAL_MEM_FENCE);
341 #pragma unroll
342 for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) {
343 if (tid < s) local_l[tid] += local_l[tid + s];
344 barrier(CLK_LOCAL_MEM_FENCE);
345 }
346
347 const ulong o_row_offset = batch_idx * o_nb3 + head_idx * o_nb1;
348 global O_DATA_TYPE4 *o_row = (global O_DATA_TYPE4 *)(o_base + o_row_offset);
349 ACC_TYPE l_final = local_l[0];
350
351 if (sinks_ptr != NULL) {
352 l_final += exp(sinks_ptr[head_idx] - m_final);
353 }
354
355 if (l_final > 0.0f) {
356 const ACC_TYPE l_inv = 1.0f / l_final;
357 for (int i = 0; i < DV_VEC; i++) {
358 local_o_comp[tid] = o_acc[i];
359 barrier(CLK_LOCAL_MEM_FENCE);
360 #pragma unroll
361 for (int s = Q1_WG_SIZE / 2; s > 0; s >>= 1) {
362 if (tid < s) local_o_comp[tid] += local_o_comp[tid + s];
363 barrier(CLK_LOCAL_MEM_FENCE);
364 }
365 if (tid == 0) {
366 o_row[i] = CONVERT_O_DATA4(local_o_comp[0] * l_inv);
367 }
368 }
369 } else if (tid == 0) {
370 #pragma unroll
371 for (int i = 0; i < DV_VEC; ++i) o_row[i] = (O_DATA_TYPE4)(0.0f);
372 }
373}