1//
2// MIT license
3// Copyright (C) 2024 Intel Corporation
4// SPDX-License-Identifier: MIT
5//
6
7//
8// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
9// See https://llvm.org/LICENSE.txt for license information.
10// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11//
12
13#include "ggml-impl.h"
14#include "common.hpp"
15#include "dequantize.hpp"
16#include "getrows.hpp"
17
18
19template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
20static void k_get_rows(
21 const void * src0, const int32_t * src1, dst_t * dst,
22 int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
23 /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
24 /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
25 /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
26 size_t s10, size_t s11, size_t s12,
27 const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
28
29 const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) +
30 item_ct1.get_local_id(2)) *
31 2;
32 const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
33 item_ct1.get_local_id(1);
34 const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
35 item_ct1.get_local_id(0)) /
36 ne12;
37 const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
38 item_ct1.get_local_id(0)) %
39 ne12;
40
41 if (i00 >= ne00) {
42 return;
43 }
44
45 const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
46
47 dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
48 const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03;
49
50 const int ib = i00/qk; // block index
51 const int iqs = (i00%qk)/qr; // quant index
52 const int iybs = i00 - i00%qk; // dst block start index
53 const int y_offset = qr == 1 ? 1 : qk/2;
54
55 // dequantize
56 dfloat2 v;
57 dequantize_kernel(src0_row, ib, iqs, v);
58
59 dst_row[iybs + iqs + 0] = v.x();
60 dst_row[iybs + iqs + y_offset] = v.y();
61}
62
63template<typename src0_t, typename dst_t>
64static void k_get_rows_float(
65 const src0_t * src0, const int32_t * src1, dst_t * dst,
66 int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
67 /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
68 /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
69 /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
70 size_t s10, size_t s11, size_t s12,
71 const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
72
73 const int i00 = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
74 item_ct1.get_local_id(2);
75 const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
76 item_ct1.get_local_id(1);
77 const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
78 item_ct1.get_local_id(0)) /
79 ne12;
80 const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
81 item_ct1.get_local_id(0)) %
82 ne12;
83
84 if (i00 >= ne00) {
85 return;
86 }
87
88 const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
89
90 dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
91 const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03);
92
93 dst_row[i00] = src0_row[i00];
94}
95
96template <int qk, int qr, dequantize_kernel_t dq>
97static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
98 ggml_tensor *dst, const void *src0_dd,
99 const int32_t *src1_dd, float *dst_dd,
100 queue_ptr stream) {
101
102 GGML_TENSOR_BINARY_OP_LOCALS
103
104 const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
105 const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE);
106 const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
107
108 // strides in elements
109 //const size_t s0 = nb0 / ggml_element_size(dst);
110 const size_t s1 = nb1 / ggml_element_size(dst);
111 const size_t s2 = nb2 / ggml_element_size(dst);
112 const size_t s3 = nb3 / ggml_element_size(dst);
113
114 const size_t s10 = nb10 / ggml_element_size(src1);
115 const size_t s11 = nb11 / ggml_element_size(src1);
116 const size_t s12 = nb12 / ggml_element_size(src1);
117 //const size_t s13 = nb13 / ggml_element_size(src1);
118
119 GGML_ASSERT(ne00 % 2 == 0);
120
121 stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
122 [=](sycl::nd_item<3> item_ct1) {
123 k_get_rows<qk, qr, dq>(
124 src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
125 s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
126 });
127
128 GGML_UNUSED(dst);
129 GGML_UNUSED(ctx);
130}
131
132template <typename src0_t>
133static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
134 const ggml_tensor *src1, ggml_tensor *dst,
135 const src0_t *src0_dd, const int32_t *src1_dd,
136 float *dst_dd, queue_ptr stream) {
137
138 GGML_TENSOR_BINARY_OP_LOCALS
139
140 const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
141 const int block_num_x = (ne00 + SYCL_GET_ROWS_BLOCK_SIZE - 1) / SYCL_GET_ROWS_BLOCK_SIZE;
142 const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
143
144 // strides in elements
145 //const size_t s0 = nb0 / ggml_element_size(dst);
146 const size_t s1 = nb1 / ggml_element_size(dst);
147 const size_t s2 = nb2 / ggml_element_size(dst);
148 const size_t s3 = nb3 / ggml_element_size(dst);
149
150 const size_t s10 = nb10 / ggml_element_size(src1);
151 const size_t s11 = nb11 / ggml_element_size(src1);
152 const size_t s12 = nb12 / ggml_element_size(src1);
153 //const size_t s13 = nb13 / ggml_element_size(src1);
154
155 {
156 dpct::has_capability_or_fail(stream->get_device(),
157 {sycl::aspect::fp16});
158
159 stream->parallel_for(
160 sycl::nd_range<3>(block_nums * block_dims, block_dims),
161 [=](sycl::nd_item<3> item_ct1) {
162 k_get_rows_float(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
163 s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
164 });
165 }
166
167 GGML_UNUSED(dst);
168 GGML_UNUSED(ctx);
169}
170
171void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
172 GGML_ASSERT(dst->src[1]->type == GGML_TYPE_I32);
173 GGML_ASSERT(dst->type == GGML_TYPE_F32);
174
175 GGML_ASSERT(dst->src[0]->nb[0] == ggml_type_size(dst->src[0]->type));
176 GGML_ASSERT(dst->src[1]->nb[0] == ggml_type_size(dst->src[1]->type));
177 GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type));
178
179 const int32_t * src1_i32 = (const int32_t *) dst->src[1]->data;
180 /* TODO: Refactor and remove duplicates */
181 switch (dst->src[0]->type) {
182 case GGML_TYPE_F16:
183 get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const sycl::half *)dst->src[0]->data,
184 src1_i32, (float *)dst->data, ctx.stream());
185 break;
186 case GGML_TYPE_F32:
187 get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
188 src1_i32, (float *)dst->data, ctx.stream());
189 break;
190 case GGML_TYPE_Q4_0:
191 get_rows_sycl<QK4_0, QR4_0, dequantize_q4_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
192 src1_i32, (float *)dst->data, ctx.stream());
193 break;
194 case GGML_TYPE_Q4_1:
195 get_rows_sycl<QK4_1, QR4_1, dequantize_q4_1>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
196 src1_i32, (float *)dst->data, ctx.stream());
197 break;
198 case GGML_TYPE_Q5_0:
199 get_rows_sycl<QK5_0, QR5_0, dequantize_q5_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
200 src1_i32, (float *)dst->data, ctx.stream());
201 break;
202 case GGML_TYPE_Q5_1:
203 get_rows_sycl<QK5_1, QR5_1, dequantize_q5_1>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
204 src1_i32, (float *)dst->data, ctx.stream());
205 break;
206 case GGML_TYPE_Q8_0:
207 get_rows_sycl<QK8_0, QR8_0, dequantize_q8_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
208 src1_i32, (float *)dst->data, ctx.stream());
209 break;
210 default:
211 // TODO: k-quants
212 GGML_LOG_ERROR("%s: unsupported type: %s\n", __func__, ggml_type_name(dst->src[0]->type));
213 GGML_ABORT("fatal error");
214 }
215}