1#include "quantize.cuh"
2#include <cstdint>
3
4__launch_bounds__(CUDA_QUANTIZE_BLOCK_SIZE, 1)
5static __global__ void quantize_q8_1(
6 const float * __restrict__ x, void * __restrict__ vy,
7 const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03,
8 const int64_t ne0, const uint32_t ne1, const uint3 ne2) {
9 const int64_t i0 = (int64_t)blockDim.x*blockIdx.x + threadIdx.x;
10
11 if (i0 >= ne0) {
12 return;
13 }
14
15 const int64_t i3 = fastdiv(blockIdx.z, ne2);
16 const int64_t i2 = blockIdx.z - i3*ne2.z;
17 const int64_t i1 = blockIdx.y;
18
19 const int64_t & i00 = i0;
20 const int64_t & i01 = i1;
21 const int64_t & i02 = i2;
22 const int64_t & i03 = i3;
23
24 const int64_t i_cont = ((i3*ne2.z + i2) * ne1 + i1) * ne0 + i0;
25
26 block_q8_1 * y = (block_q8_1 *) vy;
27
28 const int64_t ib = i_cont / QK8_1; // block index
29 const int64_t iqs = i_cont % QK8_1; // quant index
30
31 const float xi = i0 < ne00 ? x[i03*s03 + i02*s02 + i01*s01 + i00] : 0.0f;
32 float amax = fabsf(xi);
33 float sum = xi;
34
35 amax = warp_reduce_max<QK8_1>(amax);
36 sum = warp_reduce_sum<QK8_1>(sum);
37
38 const float d = amax / 127.0f;
39 const int8_t q = amax == 0.0f ? 0 : roundf(xi / d);
40
41 y[ib].qs[iqs] = q;
42
43 if (iqs > 0) {
44 return;
45 }
46
47 y[ib].ds = make_half2(d, sum);
48}
49
50__device__ __forceinline__ uint8_t compute_e8m0_scale(float amax) {
51 if (!(amax > 0.0f)) {
52 return 0;
53 }
54
55 // FP4 E2M1: max exponent (unbiased) is 2.
56 constexpr int FP4_E2M1_EMAX = 2;
57
58 const float e = log2f(amax);
59
60 // "even" -> round-to-nearest integer, ties-to-even
61 const int e_int = __float2int_rn(e);
62
63 const int shared_exp = e_int - FP4_E2M1_EMAX;
64
65 int biased = shared_exp + 127;
66
67 biased = max(biased, 0);
68 biased = min(biased, 254);
69
70 return static_cast<uint8_t>(biased);
71}
72
73// quantize values in the format mxfp4 is stored which is interleaved nibbles
74// i.e. a block a0-a31 is represented as a0a16,a1a17 ...a15a31
75static __global__ void quantize_mmq_mxfp4(const float * __restrict__ x,
76 const int32_t * __restrict__ ids,
77 void * __restrict__ vy,
78 const int64_t ne00,
79 const int64_t s01,
80 const int64_t s02,
81 const int64_t s03,
82 const int64_t ne0,
83 const int ne1,
84 const int ne2) {
85 constexpr int vals_per_scale = 32;
86 constexpr int vals_per_warp = 2 * vals_per_scale; // Each warp processes 2 blocks of 32 = 64 values
87
88 const int warp_id = threadIdx.y;
89 const int lane_id_32 = threadIdx.x;
90
91 const int nwarps = blockDim.y;
92
93 const int64_t warp_start_offset = (blockIdx.y * nwarps + warp_id) * vals_per_warp;
94
95 if (warp_start_offset >= ne0) {
96 return;
97 }
98
99 const int64_t i1 = blockIdx.x;
100 const int64_t i2 = blockIdx.z % ne2;
101 const int64_t i3 = blockIdx.z / ne2;
102
103 const int64_t i01 = ids ? ids[i1] : i1;
104 const int64_t i02 = i2;
105 const int64_t i03 = i3;
106
107 block_fp4_mmq * y = (block_fp4_mmq *) vy;
108
109 const int64_t block_fp4_mmq_size = 8 * QK_MXFP4; // 256 values
110 const int64_t ib0 = blockIdx.z * ((int64_t) ne1 * (ne0 / block_fp4_mmq_size));
111 const int64_t ib = ib0 + (warp_start_offset / block_fp4_mmq_size) * ne1 + blockIdx.x;
112 const int64_t quad_idx_in_block = (warp_start_offset % block_fp4_mmq_size) / vals_per_warp;
113
114 const int group_id = lane_id_32 / 4;
115 const int lane_in_group = lane_id_32 % 4;
116 const int base = group_id * 2;
117 char2 * yqs2 = (char2 *) y[ib].qs;
118
119 const int64_t base_pos = i03 * s03 + i02 * s02 + i01 * s01;
120
121 uint8_t scales[2];
122
123#pragma unroll
124 for (int b = 0; b < 2; ++b) {
125 const int64_t i0 = warp_start_offset + b * vals_per_scale + lane_id_32;
126 const float xi = (i0 < ne00) ? x[base_pos + i0] : 0.0f;
127
128 float amax = fabsf(xi);
129#pragma unroll
130 for (int mask = 16; mask > 0; mask >>= 1) {
131 amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, mask, WARP_SIZE));
132 }
133
134 const uint8_t e = compute_e8m0_scale(amax);
135 scales[b] = e;
136 const float inv_s = (amax == 0.0f) ? 0.0f : __frcp_rn(ggml_cuda_e8m0_to_fp32(e));
137
138#if CUDART_VERSION >= 12080
139 const float scaled_val = xi * inv_s;
140
141 const float val0 = __shfl_sync(0xFFFFFFFF, scaled_val, base, WARP_SIZE);
142 const float val1 = __shfl_sync(0xFFFFFFFF, scaled_val, base + 16, WARP_SIZE);
143 const float val2 = __shfl_sync(0xFFFFFFFF, scaled_val, base + 1, WARP_SIZE);
144 const float val3 = __shfl_sync(0xFFFFFFFF, scaled_val, base + 17, WARP_SIZE);
145
146 if (lane_in_group == 0) {
147 __nv_fp4x4_e2m1 fp4_packed(make_float4(val0, val1, val2, val3));
148
149 yqs2[quad_idx_in_block * 16 + b * 8 + group_id] = *(char2 *) &fp4_packed;
150 }
151#else
152 // Fallback: manual FP4 conversion using LUT
153 const uint8_t q_val = ggml_cuda_float_to_fp4_e2m1(xi, inv_s);
154
155 const uint8_t q_lo_0 = __shfl_sync(0xFFFFFFFF, q_val, base, WARP_SIZE);
156 const uint8_t q_lo_1 = __shfl_sync(0xFFFFFFFF, q_val, base + 1, WARP_SIZE);
157 const uint8_t q_hi_0 = __shfl_sync(0xFFFFFFFF, q_val, base + 16, WARP_SIZE);
158 const uint8_t q_hi_1 = __shfl_sync(0xFFFFFFFF, q_val, base + 17, WARP_SIZE);
159
160 if (lane_in_group == 0) {
161 char2 q;
162 q.x = (q_hi_0 << 4) | q_lo_0;
163 q.y = (q_hi_1 << 4) | q_lo_1;
164 yqs2[quad_idx_in_block * 16 + b * 8 + group_id] = q;
165 }
166#endif // CUDART_VERSION >= 12080
167 }
168
169 if (lane_id_32 == 0) {
170 // Store 2 scales packed into 1 uint32
171 y[ib].d4[quad_idx_in_block] = (scales[1] << 8) | scales[0];
172 }
173}
174
175template <mmq_q8_1_ds_layout ds_layout>
176static __global__ void quantize_mmq_q8_1(
177 const float * __restrict__ x, const int32_t * __restrict__ ids, void * __restrict__ vy,
178 const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03,
179 const int64_t ne0, const int ne1, const int ne2) {
180
181 constexpr int vals_per_scale = ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6 ? 64 : 32;
182 constexpr int vals_per_sum = ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6 ? 16 : 32;
183
184 const int64_t i0 = ((int64_t)blockDim.x*blockIdx.y + threadIdx.x)*4;
185
186 if (i0 >= ne0) {
187 return;
188 }
189
190 const int64_t i1 = blockIdx.x;
191 const int64_t i2 = blockIdx.z % ne2;
192 const int64_t i3 = blockIdx.z / ne2;
193
194 const int64_t i00 = i0;
195 const int64_t i01 = ids ? ids[i1] : i1;
196 const int64_t i02 = i2;
197 const int64_t i03 = i3;
198
199 const float4 * x4 = (const float4 *) x;
200
201 block_q8_1_mmq * y = (block_q8_1_mmq *) vy;
202
203 const int64_t ib0 = blockIdx.z*((int64_t)gridDim.x*gridDim.y*blockDim.x/QK8_1); // first block of channel
204 const int64_t ib = ib0 + (i0 / (4*QK8_1))*ne1 + blockIdx.x; // block index in channel
205 const int64_t iqs = i0 % (4*QK8_1); // quant index in block
206
207 // Load 4 floats per thread and calculate max. abs. value between them:
208 const float4 xi = i0 < ne00 ? x4[(i03*s03 + i02*s02 + i01*s01 + i00)/4] : make_float4(0.0f, 0.0f, 0.0f, 0.0f);
209 float amax = fabsf(xi.x);
210 amax = fmaxf(amax, fabsf(xi.y));
211 amax = fmaxf(amax, fabsf(xi.z));
212 amax = fmaxf(amax, fabsf(xi.w));
213
214 // Exchange max. abs. value between vals_per_scale/4 threads.
215#pragma unroll
216 for (int offset = vals_per_scale/8; offset > 0; offset >>= 1) {
217 amax = fmaxf(amax, __shfl_xor_sync(0xFFFFFFFF, amax, offset, WARP_SIZE));
218 }
219
220 float sum;
221 if (ds_layout != MMQ_Q8_1_DS_LAYOUT_D4) {
222 sum = xi.x + xi.y + xi.z + xi.w;
223
224 // Calculate sums across vals_per_sum/4 threads.
225#pragma unroll
226 for (int offset = vals_per_sum/8; offset > 0; offset >>= 1) {
227 sum += __shfl_xor_sync(0xFFFFFFFF, sum, offset, WARP_SIZE);
228 }
229 }
230
231 const float d_inv = 127.0f / amax;
232 char4 q;
233 q.x = roundf(xi.x*d_inv);
234 q.y = roundf(xi.y*d_inv);
235 q.z = roundf(xi.z*d_inv);
236 q.w = roundf(xi.w*d_inv);
237
238 // Write back 4 int8 values as a single 32 bit value for better memroy bandwidth:
239 char4 * yqs4 = (char4 *) y[ib].qs;
240 yqs4[iqs/4] = q;
241
242 if (ds_layout == MMQ_Q8_1_DS_LAYOUT_D2S6) {
243 if (iqs % 16 != 0 || iqs >= 96) {
244 return;
245 }
246
247 y[ib].d2s6[2 + iqs/16] = sum;
248
249 if (iqs % 64 != 0) {
250 return;
251 }
252
253 const float d = 1.0f / d_inv;
254
255 y[ib].d2s6[iqs/64] = d;
256
257 return;
258 }
259
260 if (iqs % 32 != 0) {
261 return;
262 }
263
264 const float d = 1.0f / d_inv;
265
266 if (ds_layout == MMQ_Q8_1_DS_LAYOUT_DS4) {
267 y[ib].ds4[iqs/32] = make_half2(d, sum);
268 } else {
269 y[ib].d4[iqs/32] = d;
270 }
271}
272
273void quantize_row_q8_1_cuda(
274 const float * x, const int32_t * ids, void * vy, const ggml_type type_src0,
275 const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03,
276 const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, cudaStream_t stream) {
277 GGML_ASSERT(!ids);
278 GGML_ASSERT(ne0 % QK8_1 == 0);
279
280 const uint3 ne2_fastdiv = init_fastdiv_values(ne2);
281
282 const int64_t block_num_x = (ne0 + CUDA_QUANTIZE_BLOCK_SIZE - 1) / CUDA_QUANTIZE_BLOCK_SIZE;
283 const dim3 num_blocks(block_num_x, ne1, ne2*ne3);
284 const dim3 block_size(CUDA_QUANTIZE_BLOCK_SIZE, 1, 1);
285 quantize_q8_1<<<num_blocks, block_size, 0, stream>>>(x, vy, ne00, s01, s02, s03, ne0, ne1, ne2_fastdiv);
286 GGML_UNUSED(type_src0);
287}
288
289void quantize_mmq_q8_1_cuda(
290 const float * x, const int32_t * ids, void * vy, const ggml_type type_src0,
291 const int64_t ne00, const int64_t s01, const int64_t s02, const int64_t s03,
292 const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3, cudaStream_t stream) {
293 GGML_ASSERT(ne00 % 4 == 0);
294 GGML_ASSERT(ne0 % (4*QK8_1) == 0);
295
296 // ne1 tends to assume the highest values, therefore use it as the "x" dimension of the CUDA grid:
297 const int64_t block_num_y = (ne0 + 4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ - 1) / (4*CUDA_QUANTIZE_BLOCK_SIZE_MMQ);
298 const dim3 num_blocks(ne1, block_num_y, ne2*ne3);
299 const dim3 block_size(CUDA_QUANTIZE_BLOCK_SIZE_MMQ, 1, 1);
300 switch (mmq_get_q8_1_ds_layout(type_src0)) {
301 case MMQ_Q8_1_DS_LAYOUT_D4:
302 quantize_mmq_q8_1<MMQ_Q8_1_DS_LAYOUT_D4>
303 <<<num_blocks, block_size, 0, stream>>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2);
304 break;
305 case MMQ_Q8_1_DS_LAYOUT_DS4:
306 quantize_mmq_q8_1<MMQ_Q8_1_DS_LAYOUT_DS4>
307 <<<num_blocks, block_size, 0, stream>>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2);
308 break;
309 case MMQ_Q8_1_DS_LAYOUT_D2S6:
310 quantize_mmq_q8_1<MMQ_Q8_1_DS_LAYOUT_D2S6>
311 <<<num_blocks, block_size, 0, stream>>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2);
312 break;
313 default:
314 GGML_ABORT("fatal error");
315 break;
316 }
317}
318
319void quantize_mmq_mxfp4_cuda(const float * x,
320 const int32_t * ids,
321 void * vy,
322 [[maybe_unused]] const ggml_type type_src0,
323 const int64_t ne00,
324 const int64_t s01,
325 const int64_t s02,
326 const int64_t s03,
327 const int64_t ne0,
328 const int64_t ne1,
329 const int64_t ne2,
330 const int64_t ne3,
331 cudaStream_t stream) {
332 GGML_ASSERT(ne0 % (2 * QK_MXFP4) == 0);
333
334 constexpr int nwarps = 8;
335 constexpr int vals_per_warp = 2 * QK_MXFP4;
336 constexpr int vals_per_block = nwarps * vals_per_warp;
337
338 const int64_t block_num_y = (ne0 + vals_per_block - 1) / vals_per_block;
339 const dim3 num_blocks(ne1, block_num_y, ne2 * ne3);
340 const dim3 block_size(WARP_SIZE, nwarps, 1);
341
342 quantize_mmq_mxfp4<<<num_blocks, block_size, 0, stream>>>(x, ids, vy, ne00, s01, s02, s03, ne0, ne1, ne2);
343}