1#version 450
  2
  3#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
  4
  5#include "mul_mat_vec_base.glsl"
  6
  7layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
  8
  9shared FLOAT_TYPE sccache[2][BLOCK_SIZE/16][16];
 10
 11FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
 12uint csel = 0;
 13
 14void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint ix, const uint ql_offset, const uint qh_offset, const uint s_offset, const uint y_offset, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows, const bool all_threads) {
 15    const uint y_idx = i * QUANT_K + y_offset;
 16
 17    [[unroll]] for (uint n = 0; n < num_rows; ++n) {
 18        const uint ib0 = a_offset + (first_row+n)*num_blocks_per_row;
 19        csel ^= 1;
 20
 21        if (!all_threads) { // when we don't have enough blocks to use all threads
 22            if (i < num_blocks_per_row)
 23                sccache[csel][ix][itid] = FLOAT_TYPE(data_a[ib0 + i].scales[itid]);
 24            barrier();
 25
 26            if (i >= num_blocks_per_row)
 27                continue;
 28        }
 29
 30        const uint32_t ql0_u32 =  uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 1]) << 16);
 31        const uint32_t ql32_u32 = uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 16]) | (uint32_t(data_a_packed16[ib0 + i].ql[ql_offset / 2 + 17]) << 16);
 32
 33        const uint32_t ql0_u32_lo4 = ql0_u32 & 0x0F0F0F0F;
 34        const uint32_t ql0_u32_hi4 = (ql0_u32 >> 4) & 0x0F0F0F0F;
 35        const uint32_t ql32_u32_lo4 = ql32_u32 & 0x0F0F0F0F;
 36        const uint32_t ql32_u32_hi4 = (ql32_u32 >> 4) & 0x0F0F0F0F;
 37
 38        const uint32_t qh_u32 = uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2]) | (uint32_t(data_a_packed16[ib0 + i].qh[qh_offset / 2 + 1]) << 16);
 39        const uint32_t qh0_u32 = (qh_u32 & 0x03030303) << 4;
 40        const uint32_t qh2_u32 = (qh_u32 & 0x0C0C0C0C) << 2;
 41        const uint32_t qh4_u32 = (qh_u32 & 0x30303030);
 42        const uint32_t qh6_u32 = (qh_u32 & 0xC0C0C0C0) >> 2;
 43
 44        const uint32_t q0_u32 = ql0_u32_lo4  | qh0_u32;
 45        const uint32_t q1_u32 = ql32_u32_lo4 | qh2_u32;
 46        const uint32_t q2_u32 = ql0_u32_hi4  | qh4_u32;
 47        const uint32_t q3_u32 = ql32_u32_hi4 | qh6_u32;
 48
 49        const vec4 q0 = vec4(unpack8(q0_u32)) - 32;
 50        const vec4 q1 = vec4(unpack8(q1_u32)) - 32;
 51        const vec4 q2 = vec4(unpack8(q2_u32)) - 32;
 52        const vec4 q3 = vec4(unpack8(q3_u32)) - 32;
 53
 54        if (all_threads) {
 55            sccache[csel][ix][itid] = FLOAT_TYPE(data_a[ib0 + i].scales[itid]);
 56            barrier();
 57        }
 58
 59        const FLOAT_TYPE d = FLOAT_TYPE(data_a[ib0 + i].d);
 60
 61        [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
 62            vec4 by0  = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4     ]);
 63            vec4 by32 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 +  8]);
 64            vec4 by64 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 16]);
 65            vec4 by96 = vec4(data_b_v4[(j*p.batch_stride_b + b_offset + y_idx) / 4 + 24]);
 66
 67            FLOAT_TYPE sum[4] = {0, 0, 0, 0};
 68            [[unroll]] for (uint l = 0; l < 4; ++l) {
 69                sum[0] = fma(FLOAT_TYPE(by0[l]), q0[l], sum[0]);
 70                sum[1] = fma(FLOAT_TYPE(by32[l]), q1[l], sum[1]);
 71                sum[2] = fma(FLOAT_TYPE(by64[l]), q2[l], sum[2]);
 72                sum[3] = fma(FLOAT_TYPE(by96[l]), q3[l], sum[3]);
 73            }
 74            temp[j][n] = fma(fma(sum[0], sccache[csel][ix][s_offset], fma(sum[1], sccache[csel][ix][s_offset + 2], fma(sum[2], sccache[csel][ix][s_offset + 4], sum[3] * sccache[csel][ix][s_offset + 6]))), d, temp[j][n]);
 75        }
 76    }
 77}
 78
 79void compute_outputs(const uint first_row, const uint num_rows) {
 80    uint a_offset, b_offset, d_offset;
 81    get_offsets(a_offset, b_offset, d_offset);
 82
 83    const uint num_blocks_per_row = p.ncols / QUANT_K;
 84
 85    // 16 threads are used to process each block
 86    const uint it_size = gl_WorkGroupSize.x/16;
 87    const uint tid = gl_LocalInvocationID.x;
 88    const uint itid = tid%16;  // 0...15
 89    const uint ix = tid/16;
 90
 91    const uint v_im = itid/8;                               // 0 or 1. 0 computes 0..., 1 computes 128...
 92    const uint v_in = itid - 8*v_im;                        // 0...7
 93
 94    const uint l0 = 4 * v_in;                               // 0, 4, 8, ..., 28
 95    const uint is = v_in / 4;
 96
 97    const uint ql_offset = 64*v_im + l0;
 98    const uint qh_offset = 32*v_im + l0;
 99    const uint s_offset  =  8*v_im + is;
100    const uint y_offset = 128*v_im + l0;
101
102    [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
103        [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
104            temp[j][i] = FLOAT_TYPE(0);
105        }
106    }
107
108    const uint nbr_par_th = num_blocks_per_row%it_size;
109    const uint nbr_all_th = num_blocks_per_row - nbr_par_th;
110    uint i0 = 0;
111    [[unroll]] for (; i0 < nbr_all_th; i0 += it_size)
112        calc_superblock(a_offset, b_offset, itid, ix, ql_offset, qh_offset, s_offset, y_offset, i0 + ix, num_blocks_per_row, first_row, num_rows, true);
113    calc_superblock(a_offset, b_offset, itid, ix, ql_offset, qh_offset, s_offset, y_offset, i0 + ix, num_blocks_per_row, first_row, num_rows, false);
114
115    reduce_result(temp, d_offset, first_row, num_rows, tid);
116}
117
118void main() {
119    const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
120
121    // do NUM_ROWS at a time, unless there aren't enough remaining rows
122    if (first_row + NUM_ROWS <= p.stride_d) {
123        compute_outputs(first_row, NUM_ROWS);
124    } else {
125        if (first_row >= p.stride_d) {
126            return;
127        }
128        compute_outputs(first_row, p.stride_d - first_row);
129    }
130}