1#version 450
  2#extension GL_EXT_shader_explicit_arithmetic_types_int32 : require
  3
  4#include "mul_mat_vec_base.glsl"
  5
  6layout(local_size_x_id = 0, local_size_y = 1, local_size_z = 1) in;
  7
  8FLOAT_TYPE temp[NUM_COLS][NUM_ROWS];
  9
 10void calc_superblock(const uint a_offset, const uint b_offset, const uint itid, const uint i, const uint num_blocks_per_row, const uint first_row, const uint num_rows) {
 11    const uint y_idx = i * QUANT_K + 16 * itid;
 12    const uint nibble_shift = 4 * (itid & 1);
 13    const uint ib32 = itid / 2; // 0..7
 14    uint ibi = a_offset + first_row * num_blocks_per_row + i;
 15    // Precompute db multiplication factors
 16    float db_vals[NUM_ROWS];
 17    [[unroll]] for (uint n = 0; n < num_rows; ++n) {
 18        const float d = float(data_a[ibi].d);
 19        const uint scale_raw = data_a[ibi].scales[ib32];
 20        const uint scale = (scale_raw >> nibble_shift) & 0xF;
 21        // Merge constant calculations d * (0.5 + scale) * 0.25 = d*0.125 + d*scale*0.25
 22        db_vals[n] = d * (0.125f + float(scale) * 0.25f);
 23        ibi += num_blocks_per_row;
 24    }
 25    ibi = a_offset + first_row * num_blocks_per_row + i;
 26    [[unroll]] for (uint n = 0; n < num_rows; ++n) {
 27        // Preload grid and sign data for all l values
 28        vec4 grid0_vals[2], grid1_vals[2];
 29        uint sign_vals[2], sign7_vals[2];
 30        [[unroll]] for (uint l = 0; l < 2; ++l) {
 31            const uint qs = data_a[ibi].qs[2 * itid + l];
 32            sign_vals[l] = qs >> 9;
 33            sign7_vals[l] = bitCount(sign_vals[l]);
 34            const uvec2 grid_data = iq2xs_grid[qs & 511];
 35            grid0_vals[l] = vec4(unpack8(grid_data.x));
 36            grid1_vals[l] = vec4(unpack8(grid_data.y));
 37        }
 38        // Preload B data for all j columns (reduce repeated index calculations)
 39        [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
 40            FLOAT_TYPE sum = FLOAT_TYPE(0.0);
 41            [[unroll]] for (uint l = 0; l < 2; ++l) {
 42                const uint sign = sign_vals[l];
 43                const uint sign7 = sign7_vals[l];
 44                const vec4 grid0 = grid0_vals[l];
 45                const vec4 grid1 = grid1_vals[l];
 46                // Precompute indices
 47                const uint b_idx = (j * p.batch_stride_b + b_offset + y_idx) / 4 + 2 * l;
 48                const vec4 b0 = vec4(data_b_v4[b_idx + 0]);
 49                const vec4 b4 = vec4(data_b_v4[b_idx + 1]);
 50                sum +=
 51                    fma(FLOAT_TYPE(b0.x), FLOAT_TYPE((sign &   1) != 0 ? -grid0.x : grid0.x),
 52                    fma(FLOAT_TYPE(b0.y), FLOAT_TYPE((sign &   2) != 0 ? -grid0.y : grid0.y),
 53                    fma(FLOAT_TYPE(b0.z), FLOAT_TYPE((sign &   4) != 0 ? -grid0.z : grid0.z),
 54                    fma(FLOAT_TYPE(b0.w), FLOAT_TYPE((sign &   8) != 0 ? -grid0.w : grid0.w),
 55                    fma(FLOAT_TYPE(b4.x), FLOAT_TYPE((sign &  16) != 0 ? -grid1.x : grid1.x),
 56                    fma(FLOAT_TYPE(b4.y), FLOAT_TYPE((sign &  32) != 0 ? -grid1.y : grid1.y),
 57                    fma(FLOAT_TYPE(b4.z), FLOAT_TYPE((sign &  64) != 0 ? -grid1.z : grid1.z),
 58                    fma(FLOAT_TYPE(b4.w), FLOAT_TYPE((sign7 &  1) != 0 ? -grid1.w : grid1.w),
 59                    FLOAT_TYPE(0.0)))))))));
 60            }
 61            temp[j][n] = fma(FLOAT_TYPE(db_vals[n]), sum, temp[j][n]);
 62        }
 63        ibi += num_blocks_per_row;
 64    }
 65}
 66
 67void compute_outputs(const uint32_t first_row, const uint32_t num_rows) {
 68    uint a_offset, b_offset, d_offset;
 69    get_offsets(a_offset, b_offset, d_offset);
 70
 71    const uint num_blocks_per_row = p.ncols / QUANT_K;
 72
 73    // 16 threads are used to process each block
 74    const uint blocks_per_wg = gl_WorkGroupSize.x/16;
 75    const uint tid = gl_LocalInvocationID.x;
 76    const uint itid = tid % 16;  // 0...15
 77    const uint ix = tid / 16;
 78
 79    [[unroll]] for (uint j = 0; j < NUM_COLS; ++j) {
 80        [[unroll]] for (uint i = 0; i < NUM_ROWS; ++i) {
 81            temp[j][i] = FLOAT_TYPE(0);
 82        }
 83    }
 84
 85    [[unroll]] for (uint i = ix; i < num_blocks_per_row; i += blocks_per_wg)
 86        calc_superblock(a_offset, b_offset, itid, i, num_blocks_per_row, first_row, num_rows);
 87
 88    reduce_result(temp, d_offset, first_row, num_rows, tid);
 89}
 90
 91void main() {
 92    const uint first_row = NUM_ROWS * (gl_WorkGroupID.x + gl_NumWorkGroups.x * gl_WorkGroupID.z);
 93
 94    init_iq_shmem(gl_WorkGroupSize);
 95
 96    // do NUM_ROWS at a time, unless there aren't enough remaining rows
 97    if (first_row + NUM_ROWS <= p.stride_d) {
 98        compute_outputs(first_row, NUM_ROWS);
 99    } else {
100        if (first_row >= p.stride_d) {
101            return;
102        }
103        compute_outputs(first_row, p.stride_d - first_row);
104    }
105}