1#version 450
2#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
3
4#include "mul_mat_vec_base.glsl"
5
6layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
7
8shared FLOAT_TYPE sccache1[2][BLOCK_SIZE/16][16];
9shared FLOAT_TYPE sccache2[2][BLOCK_SIZE/16][16];
10
11FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
12uint csel = 0;
13
14void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint v_im, const uint ix, const uint q_offset, const uint y_offset, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows, const bool all_threads) {
15 const uint y_idx = i * QUANT_K + y_offset;
16
17 [[unroll]] for (uint n = 0; n < num_rows; ++n) {
18 const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
19 csel ^= 1;
20
21 if (!all_threads) { // when we don't have enough blocks to use all threads
22 if (i < num_blocks_per_row) {
23 const uint32_t scale = uint32_t(data_a[ib0 + i].scales[itid]);
24 sccache1[csel][ix][itid] = FLOAT_TYPE(scale & 0xF);
25 sccache2[csel][ix][itid] = FLOAT_TYPE((scale >> 4) & 0xF);
26 }
27 barrier();
28
29 if (i >= num_blocks_per_row)
30 continue;
31 } else {
32 const uint32_t scale = uint32_t(data_a[ib0 + i].scales[itid]);
33 sccache1[csel][ix][itid] = FLOAT_TYPE(scale & 0xF);
34 sccache2[csel][ix][itid] = FLOAT_TYPE((scale >> 4) & 0xF);
35 barrier();
36 }
37
38 const uint32_t qs_u32 = uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qs[q_offset / 2 + 8]) << 16);
39 const vec4 qs_u32_0 = vec4(unpack8(qs_u32 & 0x03030303));
40 const vec4 qs_u32_2 = vec4(unpack8((qs_u32 >> 2) & 0x03030303));
41 const vec4 qs_u32_4 = vec4(unpack8((qs_u32 >> 4) & 0x03030303));
42 const vec4 qs_u32_6 = vec4(unpack8((qs_u32 >> 6) & 0x03030303));
43
44 const FLOAT_TYPE_VEC2 dm = vec2(data_a[ib0 + i].dm);
45
46 [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
47 vec2 b0 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 0]);
48 vec2 b16 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 8]);
49 vec2 b32 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]);
50 vec2 b48 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]);
51 vec2 b64 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]);
52 vec2 b80 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]);
53 vec2 b96 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]);
54 vec2 b112 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]);
55
56 FLOAT_TYPE sum1 = FLOAT_TYPE(0.0);
57 FLOAT_TYPE sum2 = FLOAT_TYPE(0.0);
58 [[unroll]] for (int l = 0; l < 2; ++l) {
59 sum1 = fma(FLOAT_TYPE(b0[l]), sccache1[csel][ix][ 8*v_im] * qs_u32_0[l ],
60 fma(FLOAT_TYPE(b16[l]), sccache1[csel][ix][1 + 8*v_im] * qs_u32_0[l+2],
61 fma(FLOAT_TYPE(b32[l]), sccache1[csel][ix][2 + 8*v_im] * qs_u32_2[l ],
62 fma(FLOAT_TYPE(b48[l]), sccache1[csel][ix][3 + 8*v_im] * qs_u32_2[l+2],
63 fma(FLOAT_TYPE(b64[l]), sccache1[csel][ix][4 + 8*v_im] * qs_u32_4[l ],
64 fma(FLOAT_TYPE(b80[l]), sccache1[csel][ix][5 + 8*v_im] * qs_u32_4[l+2],
65 fma(FLOAT_TYPE(b96[l]), sccache1[csel][ix][6 + 8*v_im] * qs_u32_6[l ],
66 fma(FLOAT_TYPE(b112[l]), sccache1[csel][ix][7 + 8*v_im] * qs_u32_6[l+2], sum1))))))));
67 sum2 = fma(FLOAT_TYPE(b0[l]), sccache2[csel][ix][ 8*v_im],
68 fma(FLOAT_TYPE(b16[l]), sccache2[csel][ix][1 + 8*v_im],
69 fma(FLOAT_TYPE(b32[l]), sccache2[csel][ix][2 + 8*v_im],
70 fma(FLOAT_TYPE(b48[l]), sccache2[csel][ix][3 + 8*v_im],
71 fma(FLOAT_TYPE(b64[l]), sccache2[csel][ix][4 + 8*v_im],
72 fma(FLOAT_TYPE(b80[l]), sccache2[csel][ix][5 + 8*v_im],
73 fma(FLOAT_TYPE(b96[l]), sccache2[csel][ix][6 + 8*v_im],
74 fma(FLOAT_TYPE(b112[l]), sccache2[csel][ix][7 + 8*v_im], sum2))))))));
75 }
76 temp[j][n] = fma(dm.x, sum1, fma(-dm.y, sum2, temp[j][n]));
77 }
78 }
79}
80
81void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
82 uint a_offset, b_offset, d_offset;
83 get_offsets(a_offset, b_offset, d_offset);
84
85 const uint num_blocks_per_row = p.ncols / QUANT_K;
86
87 // 16 threads are used to process each block
88 const uint it_size = gl_WorkGroupSize.x/16;
89 const uint tid = gl_LocalInvocationID.x;
90 const uint itid = tid%16; // 0...15
91 const uint ix = tid/16;
92
93 const uint v_im = itid/8; // 0 or 1. 0 computes 0..., 1 computes 128...
94 const uint v_in = itid - 8*v_im; // 0...7
95
96 const uint l0 = 2*v_in; // 0...15
97 const uint q_offset = 32*v_im + l0;
98 const uint y_offset = 128*v_im + l0;
99
100 [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
101 [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
102 temp[j][i] = FLOAT_TYPE(0);
103 }
104 }
105
106 const uint nbr_par_th = num_blocks_per_row%it_size;
107 const uint nbr_all_th = num_blocks_per_row - nbr_par_th;
108 uint i0 = 0;
109 [[unroll]] for (; i0 < nbr_all_th; i0 += it_size)
110 calc_superblock(a_offset, b_offset, itid, v_im, ix, q_offset, y_offset, i0 + ix, num_blocks_per_row, first_row, num_rows, true);
111 calc_superblock(a_offset, b_offset, itid, v_im, ix, q_offset, y_offset, i0 + ix, num_blocks_per_row, first_row, num_rows, false);
112
113 reduce_result(temp, d_offset, first_row, num_rows, tid);
114}
115
116void main() {
117 const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
118
119 // do NUM_ROWS at a time, unless there aren't enough remaining rows
120 if (first_row + NUM_ROWS <= p.stride_d) {
121 compute_outputs(first_row, NUM_ROWS);
122 } else {
123 if (first_row >= p.stride_d) {
124 return;
125 }
126 compute_outputs(first_row, p.stride_d - first_row);
127 }
128}