1#version 450
  2#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
  3
  4#include "mul_mat_vec_base.glsl"
  5
  6layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
  7
  8shared FLOAT_TYPE sccache[2][BLOCK_SIZE/16][2][8];
  9
 10FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
 11uint csel = 0;
 12
 13void calc_superblock(const uint a_offset, const uint b_offset, const uint ix, const uint itid8, const uint v_im, const uint v_im4, const uint v_in, const uint32_t hm_m[4], const uint q_offset, const uint y_offset, const uint s_shift, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows, const bool all_threads) {
 14    const uint y_idx = i * QUANT_K + y_offset;
 15
 16    [[unroll]] for (uint n = 0; n < num_rows; ++n) {
 17        const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
 18        csel ^= 1;
 19
 20        if (!all_threads) { // when we don't have enough blocks to use all threads
 21            if (i < num_blocks_per_row)
 22                sccache[csel][ix][v_im][itid8] = FLOAT_TYPE(int8_t(((data_a[ib0+i].scales[itid8] >> v_im4) & 0xF) | (((data_a[ib0+i].scales[itid8%4+8] >> s_shift) & 3) << 4)) - 32);
 23            barrier();
 24
 25            if (i >= num_blocks_per_row)
 26                continue;
 27        }
 28
 29        const uint32_t hmk = ~(uint32_t(data_a_packed16[ib0 + i].hmask[v_in]) | (uint32_t(data_a_packed16[ib0 + i].hmask[v_in + 8]) << 16));
 30        const vec4 hmk_0 = vec4(unpack8(((hmk & hm_m[0]) >> (    v_im4)) << 2));
 31        const vec4 hmk_1 = vec4(unpack8(((hmk & hm_m[1]) >> (1 + v_im4)) << 2));
 32        const vec4 hmk_2 = vec4(unpack8(((hmk & hm_m[2]) >> (2 + v_im4)) << 2));
 33        const vec4 hmk_3 = vec4(unpack8(((hmk & hm_m[3]) >> (3 + v_im4)) << 2));
 34
 35        // 0, 1, 16, 17
 36        uint32_t qs_u32 = uint32_t(data_a[ib0 + i].qs[q_offset]) | (uint32_t(data_a[ib0 + i].qs[q_offset + 1]) << 8);
 37        qs_u32 |= (uint32_t(data_a[ib0 + i].qs[q_offset + 16]) | (uint32_t(data_a[ib0 + i].qs[q_offset + 17]) << 8)) << 16;
 38        const vec4 qs_u32_0 = vec4(unpack8(qs_u32 & 0x03030303));
 39        const vec4 qs_u32_2 = vec4(unpack8((qs_u32 >> 2) & 0x03030303));
 40        const vec4 qs_u32_4 = vec4(unpack8((qs_u32 >> 4) & 0x03030303));
 41        const vec4 qs_u32_6 = vec4(unpack8((qs_u32 >> 6) & 0x03030303));
 42
 43        if (all_threads) {
 44            sccache[csel][ix][v_im][itid8] = FLOAT_TYPE(int8_t(((data_a[ib0+i].scales[itid8] >> v_im4) & 0xF) | (((data_a[ib0+i].scales[itid8%4+8] >> s_shift) & 3) << 4)) - 32);
 45            barrier();
 46        }
 47
 48        const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d);
 49
 50        [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
 51            vec2 b0 =   vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 +  0]);
 52            vec2 b16 =  vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 +  8]);
 53            vec2 b32 =  vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 16]);
 54            vec2 b48 =  vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 24]);
 55            vec2 b64 =  vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 32]);
 56            vec2 b80 =  vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 40]);
 57            vec2 b96 =  vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 48]);
 58            vec2 b112 = vec2(data_b_v2[(j*p.batch_stride_b + b_offset + y_idx) / 2 + 56]);
 59
 60            FLOAT_TYPE sum = FLOAT_TYPE(0.0);
 61            [[unroll]] for (int l = 0; l < 2; ++l) {
 62                sum = fma(FLOAT_TYPE(  b0[l]) * sccache[csel][ix][v_im][0], qs_u32_0[l  ] - hmk_0[l  ],
 63                      fma(FLOAT_TYPE( b16[l]) * sccache[csel][ix][v_im][1], qs_u32_0[l+2] - hmk_0[l+2],
 64                      fma(FLOAT_TYPE( b32[l]) * sccache[csel][ix][v_im][2], qs_u32_2[l  ] - hmk_1[l  ],
 65                      fma(FLOAT_TYPE( b48[l]) * sccache[csel][ix][v_im][3], qs_u32_2[l+2] - hmk_1[l+2],
 66                      fma(FLOAT_TYPE( b64[l]) * sccache[csel][ix][v_im][4], qs_u32_4[l  ] - hmk_2[l  ],
 67                      fma(FLOAT_TYPE( b80[l]) * sccache[csel][ix][v_im][5], qs_u32_4[l+2] - hmk_2[l+2],
 68                      fma(FLOAT_TYPE( b96[l]) * sccache[csel][ix][v_im][6], qs_u32_6[l  ] - hmk_3[l  ],
 69                      fma(FLOAT_TYPE(b112[l]) * sccache[csel][ix][v_im][7], qs_u32_6[l+2] - hmk_3[l+2], sum))))))));
 70            }
 71            temp[j][n] = fma(d, sum, temp[j][n]);
 72        }
 73    }
 74}
 75
 76void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
 77    uint a_offset, b_offset, d_offset;
 78    get_offsets(a_offset, b_offset, d_offset);
 79
 80    const uint num_blocks_per_row = p.ncols / QUANT_K;
 81
 82    // 16 threads are used to process each block
 83    const uint it_size = gl_WorkGroupSize.x/16;
 84    const uint tid = gl_LocalInvocationID.x;
 85    const uint itid = tid%16;  // 0...15
 86    const uint ix = tid/16;
 87    const uint itid8 = itid%8;
 88
 89    const uint v_im = itid/8;                               // 0 or 1. 0 computes 0..., 1 computes 128...
 90    const uint v_im4 = v_im*4;
 91    const uint v_in = itid - 8*v_im;                        // 0...7
 92
 93    const uint32_t m = 0x01010101 << (4 * v_im);
 94    uint32_t hm_m[4];
 95    [[unroll]] for (uint j = 0; j < 4; ++j)
 96        hm_m[j] = m << j;
 97
 98    const uint l0 = 2*v_in;                                 // 0...15
 99    const uint q_offset = 32*v_im + l0;
100    const uint y_offset = 128*v_im + l0;
101
102    [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
103        [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
104            temp[j][i] = FLOAT_TYPE(0);
105        }
106    }
107
108    const uint s_shift = v_im4 + 2*(itid8/4);
109
110    const uint nbr_par_th = num_blocks_per_row%it_size;
111    const uint nbr_all_th = num_blocks_per_row - nbr_par_th;
112    uint i0 = 0;
113    [[unroll]] for (; i0 < nbr_all_th; i0 += it_size)
114        calc_superblock(a_offset, b_offset, ix, itid8, v_im, v_im4, v_in, hm_m, q_offset, y_offset, s_shift, i0 + ix, num_blocks_per_row, first_row, num_rows, true);
115    calc_superblock(a_offset, b_offset, ix, itid8, v_im, v_im4, v_in, hm_m, q_offset, y_offset, s_shift, i0 + ix, num_blocks_per_row, first_row, num_rows, false);
116
117    reduce_result(temp, d_offset, first_row, num_rows, tid);
118}
119
120void main() {
121    const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
122
123    // do NUM_ROWS at a time, unless there aren't enough remaining rows
124    if (first_row + NUM_ROWS <= p.stride_d) {
125        compute_outputs(first_row, NUM_ROWS);
126    } else {
127        if (first_row >= p.stride_d) {
128            return;
129        }
130        compute_outputs(first_row, p.stride_d - first_row);
131    }
132}