summaryrefslogtreecommitdiff
path: root/llama.cpp/ggml/src/ggml-opencl/kernels/gemv_noshuffle_general_q8_0_f32.cl
blob: f944ef3a992ebb76fdbed78405a9742db21893bd (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
#pragma OPENCL EXTENSION cl_khr_fp16 : enable
#pragma OPENCL EXTENSION cl_khr_subgroups : enable

#ifdef cl_qcom_reqd_sub_group_size
#pragma OPENCL EXTENSION cl_qcom_reqd_sub_group_size : enable
#define ADRENO_GPU 1
#define REQD_SUBGROUP_SIZE_64 __attribute__((qcom_reqd_sub_group_size("half")))
#endif

#define QK8_0 32
#define N_SIMDGROUP 4

#define dequantizeBlockAccum_ns_sgbroadcast_1(total_sums, bits8, scale, y) \
    float shared_y; \
    char elem; \
                                             \
    shared_y = sub_group_broadcast(y.s0, 0); \
    elem = (char)(bits8.s0 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s1, 0); \
    elem = (char)((bits8.s0 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s2, 0); \
    elem = (char)((bits8.s0 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s3, 0); \
    elem = (char)((bits8.s0 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \
                                             \
    shared_y = sub_group_broadcast(y.s4, 0); \
    elem = (char)(bits8.s1 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s5, 0); \
    elem = (char)((bits8.s1 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s6, 0); \
    elem = (char)((bits8.s1 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s7, 0); \
    elem = (char)((bits8.s1 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \
                                             \
    shared_y = sub_group_broadcast(y.s0, 1); \
    elem = (char)(bits8.s2 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s1, 1); \
    elem = (char)((bits8.s2 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s2, 1); \
    elem = (char)((bits8.s2 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s3, 1); \
    elem = (char)((bits8.s2 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \
                                             \
    shared_y = sub_group_broadcast(y.s4, 1); \
    elem = (char)(bits8.s3 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s5, 1); \
    elem = (char)((bits8.s3 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s6, 1); \
    elem = (char)((bits8.s3 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s7, 1); \
    elem = (char)((bits8.s3 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \
                                             \
    shared_y = sub_group_broadcast(y.s0, 2); \
    elem = (char)(bits8.s4 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s1, 2); \
    elem = (char)((bits8.s4 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s2, 2); \
    elem = (char)((bits8.s4 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s3, 2); \
    elem = (char)((bits8.s4 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \
                                             \
    shared_y = sub_group_broadcast(y.s4, 2); \
    elem = (char)(bits8.s5 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s5, 2); \
    elem = (char)((bits8.s5 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s6, 2); \
    elem = (char)((bits8.s5 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s7, 2); \
    elem = (char)((bits8.s5 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \
                                             \
    shared_y = sub_group_broadcast(y.s0, 3); \
    elem = (char)(bits8.s6 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s1, 3); \
    elem = (char)((bits8.s6 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s2, 3); \
    elem = (char)((bits8.s6 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s3, 3); \
    elem = (char)((bits8.s6 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \
                                             \
    shared_y = sub_group_broadcast(y.s4, 3); \
    elem = (char)(bits8.s7 & 0x000000FF); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s5, 3); \
    elem = (char)((bits8.s7 & 0x0000FF00) >> 8); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s6, 3); \
    elem = (char)((bits8.s7 & 0x00FF0000) >> 16); \
    total_sums += convert_int(elem) * scale * shared_y; \
    shared_y = sub_group_broadcast(y.s7, 3); \
    elem = (char)((bits8.s7 & 0xFF000000) >> 24); \
    total_sums += convert_int(elem) * scale * shared_y; \

#ifdef ADRENO_GPU
REQD_SUBGROUP_SIZE_64
#endif
__kernel void kernel_gemv_noshuffle(
        __read_only  image1d_buffer_t src0_q,  // quantized A
        global half  * src0_d,  // A scales
        __read_only  image1d_buffer_t src1,    // B
        ulong offset1,            // offset to B (0)
        global float * dst,     // C
        ulong offsetd,            // offset to C
        int ne00,               // K
        int ne01,               // M
        int ne02,               // 1
        int ne10,               // K
        int ne12,               // 1
        int ne0,                // M
        int ne1,                // N
        int r2,                 // 1
        int r3)
{
    uint groupId = get_local_id(1);
    uint gid     = get_global_id(0);
    ushort slid    = get_sub_group_local_id();

    uint K = ne00;
    uint M = ne01;

    uint LINE_STRIDE_A = M;
    uint BLOCK_STRIDE_A = 8 * M;   // 32 / 4 = 8

    __private uint8     regA;
    __private half      regS;
    __private float8    regB;

    __private float totalSum = (float)(0.0f);

    // loop along K in block granularity, skip 4 blocks every iter
    #pragma unroll 1 /* tell compiler not to unroll */
    for (uint k = groupId; k < (K / QK8_0); k += N_SIMDGROUP) {
        regS = src0_d[gid + k * LINE_STRIDE_A]; // each fiber loads scale of one rows
        // first 4 fibers in each wave load 8 B values to its private scope
        if (slid < 4) {
            regB.s0123 = read_imagef(src1, (slid * 2 + k * 8));
            regB.s4567 = read_imagef(src1, (1 + slid * 2 + k * 8));
        }

        // load weights for one block in consecutive rows
        regA.s0 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 0)).x;
        regA.s1 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 1)).x;
        regA.s2 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 2)).x;
        regA.s3 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 3)).x;
        regA.s4 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 4)).x;
        regA.s5 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 5)).x;
        regA.s6 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 6)).x;
        regA.s7 = read_imageui(src0_q, (gid + k * BLOCK_STRIDE_A + LINE_STRIDE_A * 7)).x;

        dequantizeBlockAccum_ns_sgbroadcast_1(totalSum, regA, regS, regB);
    }

    // reduction in local memory, assumes #wave=4
    __local float reduceLM[SIMDGROUP_WIDTH * 3];
    if (groupId == 1) reduceLM[SIMDGROUP_WIDTH * 0 + slid] = totalSum;
    if (groupId == 2) reduceLM[SIMDGROUP_WIDTH * 1 + slid] = totalSum;
    if (groupId == 3) reduceLM[SIMDGROUP_WIDTH * 2 + slid] = totalSum;
    barrier(CLK_LOCAL_MEM_FENCE);
    if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 0 + slid];
    if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 1 + slid];
    if (groupId == 0) totalSum += reduceLM[SIMDGROUP_WIDTH * 2 + slid];

    // 1 outputs per fiber in wave 0
    if (groupId == 0) {
        dst = (global float*)((global char*)dst + offsetd);
        dst[gid] = totalSum;
    }
}