1/*
2 * Copyright (c) 2023-2026 The ggml authors
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a copy
5 * of this software and associated documentation files (the "Software"), to
6 * deal in the Software without restriction, including without limitation the
7 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
8 * sell copies of the Software, and to permit persons to whom the Software is
9 * furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
13 *
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
17 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
20 * IN THE SOFTWARE.
21 */
22
23#ifndef CANN_ACL_TENSOR_H
24#define CANN_ACL_TENSOR_H
25
26#include "common.h"
27
28#include <aclnn/aclnn_base.h>
29
30#include <algorithm>
31#include <cstring>
32
33/**
34 * @brief Maps a ggml_type to its corresponding aclDataType.
35 *
36 * @details This function takes a ggml_type as input and returns the corresponding
37 * aclDataType. It supports mapping for various ggml_types. If the input type
38 * does not match any of the predefined ggml_types, the function returns
39 * ACL_DT_UNDEFINED.
40 *
41 * @param type The ggml_type to be mapped.
42 * @return The corresponding aclDataType. If the input type is not recognized,
43 * ACL_DT_UNDEFINED is returned.
44 */
45aclDataType ggml_cann_type_mapping(ggml_type type);
46
47// Deleter for acl objects.
48template <typename T, aclError (*DestroyFunc)(const T *)> struct acl_deleter {
49 void operator()(T * ptr) const noexcept {
50 if (ptr) {
51 ACL_CHECK(DestroyFunc(ptr));
52 }
53 }
54};
55
56using acl_tensor_ptr = std::unique_ptr<aclTensor, acl_deleter<aclTensor, aclDestroyTensor>>;
57using acl_int_array_ptr = std::unique_ptr<aclIntArray, acl_deleter<aclIntArray, aclDestroyIntArray>>;
58using acl_scalar_ptr = std::unique_ptr<aclScalar, acl_deleter<aclScalar, aclDestroyScalar>>;
59using acl_tensor_list_ptr = std::unique_ptr<aclTensorList, acl_deleter<aclTensorList, aclDestroyTensorList>>;
60
61/**
62 * @brief Creates an ACL tensor from a ggml_tensor with optional shape.
63 *
64 * @details This function creates an ACL tensor based on the properties of the
65 * provided ggml_tensor. It supports customer shape by adjusting dimensions
66 * and strides accordingly. If customer shape is applied, additional
67 * dimensions and strides are calculated based on the provided parameters.
68 *
69 * @param tensor Pointer to the ggml_tensor to be converted to ACL tensor.
70 * @param ne Pointer to an array containing dimensions. Defaults to nullptr
71 * if no customer shape is applied.
72 * @param nb Pointer to an array containing strides. Defaults to nullptr
73 * if no customer shape is applied.
74 * @param dims Number of dimensions in the tensor. Defaults to 0 if no customer
75 * shape is applied.
76 * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
77 * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
78 * @return Pointer to the created ACL tensor.
79 */
80acl_tensor_ptr ggml_cann_create_tensor(const ggml_tensor * tensor,
81 int64_t * ne = nullptr,
82 size_t * nb = nullptr,
83 int64_t dims = 0,
84 aclFormat format = ACL_FORMAT_ND,
85 size_t offset = 0);
86
87/**
88 * @brief Template for creating an ACL tensor from provided parameters. typename TYPE
89 * should be size_t or float.
90 *
91 * @details This function creates an ACL tensor using the provided data pointer,
92 * data type, dimensions, strides, format, offset, and additional parameters.
93 * It calculates necessary dimensions and strides based on the provided ne and nb
94 * arrays, adjusting them for the ACL tensor creation. The ACL storage length
95 * is also calculated based on the provided dimensions and strides.
96 *
97 * @param data_ptr Pointer to the data buffer for the ACL tensor.
98 * @param dtype ACL data type of the tensor.
99 * @param type_size Size of each element in the tensor data buffer.
100 * @param ne Pointer to an array containing tensor dimensions.
101 * @param nb Pointer to an array containing tensor strides.
102 * @param dims Number of dimensions of the tensor.
103 * @param format ACL tensor format. Defaults to ACL_FORMAT_ND.
104 * @param offset Offset in bytes for the ACL tensor data. Defaults to 0.
105 * @return Pointer to the created ACL tensor.
106 */
107template <typename TYPE>
108acl_tensor_ptr ggml_cann_create_tensor(void * data_ptr,
109 aclDataType dtype,
110 TYPE type_size,
111 int64_t * ne,
112 TYPE * nb,
113 int64_t dims,
114 aclFormat format = ACL_FORMAT_ND,
115 size_t offset = 0) {
116 int64_t tmp_ne[GGML_MAX_DIMS * 2];
117 int64_t tmp_stride[GGML_MAX_DIMS * 2];
118
119 memcpy(tmp_ne, ne, dims * sizeof(int64_t));
120 for (int i = 0; i < dims; i++) {
121 tmp_stride[i] = nb[i] / type_size;
122 }
123
124 int64_t acl_storage_len = 1;
125 for (int i = 0; i < dims; i++) {
126 acl_storage_len += (tmp_ne[i] - 1) * tmp_stride[i];
127 }
128
129 std::reverse(tmp_ne, tmp_ne + dims);
130 std::reverse(tmp_stride, tmp_stride + dims);
131
132 aclTensor * raw =
133 aclCreateTensor(tmp_ne, dims, dtype, tmp_stride, offset / type_size, format, &acl_storage_len, 1, data_ptr);
134
135 return acl_tensor_ptr(raw);
136}
137
138/**
139 * @brief Create an ACL int array resource wrapped in a smart pointer.
140 *
141 * This function constructs an aclIntArray from the provided int64_t values
142 * and returns it as an acl_int_array_ptr (a std::unique_ptr with a custom
143 * deleter). The returned pointer owns the ACL resource and will automatically
144 * destroy it via aclDestroyIntArray().
145 *
146 * @param value Pointer to the int64_t elements.
147 * @param size Number of elements in value.
148 *
149 * @return A smart pointer managing the created ACL int array.
150 */
151acl_int_array_ptr ggml_cann_create_int_array(const int64_t * value, uint64_t size);
152
153/**
154 * @brief Create an ACL scalar resource wrapped in a smart pointer.
155 *
156 * This function constructs an aclScalar from the raw value pointer and ACL
157 * data type, then returns it as an acl_scalar_ptr (a std::unique_ptr with
158 * a custom deleter). The returned pointer owns the ACL scalar and will
159 * automatically destroy it via aclDestroyScalar().
160 *
161 * @param value Pointer to the raw scalar memory.
162 * @param dataType ACL data type of the scalar.
163 *
164 * @return A smart pointer managing the created ACL scalar.
165 */
166acl_scalar_ptr ggml_cann_create_scalar(void * value, aclDataType dataType);
167
168/**
169 * @brief Create an ACL tensor list from multiple tensor smart pointers.
170 *
171 * This function accepts a variadic list of acl_tensor_ptr (a unique_ptr with
172 * custom deleter) and produces an aclTensorList using aclCreateTensorList().
173 *
174 * The lifecycle management of the tensor objects changes as follows:
175 * - aclCreateTensorList() takes ownership of the tensors
176 * - Each input smart pointer releases ownership using release()
177 * - As a result, the tensors will NOT be destroyed by unique_ptr
178 * - Instead, they will be destroyed when aclDestroyTensorList() is called
179 *
180 * This ensures correct ownership transfer and prevents double-free situations.
181 *
182 * @param acl_tensor_ptr Variadic template parameter; each argument must be
183 * a unique_ptr-like type supporting get() and release().
184 *
185 * @param tensors Variadic list of acl_tensor_ptr objects. Ownership of
186 * each tensor is transferred away from these smart pointers.
187 *
188 * @return A smart pointer (acl_tensor_list_ptr) owning the created ACL tensor list.
189 *
190 * @note This implementation is C++11 compatible. The ownership-release process is
191 * executed using a pack expansion inside an initializer list.
192 */
193template <typename... acl_tensor_ptr> acl_tensor_list_ptr ggml_cann_create_tensor_list(acl_tensor_ptr &&... tensors) {
194 aclTensor * raw_tensors[] = { tensors.get()... };
195 aclTensorList * raw = aclCreateTensorList(raw_tensors, sizeof...(tensors));
196 // aclTensor will release by aclTensorList, so release ownership without
197 // destroying the tensor
198 int dummy[] = { (tensors.release(), 0)... };
199 GGML_UNUSED(dummy);
200 return acl_tensor_list_ptr(raw);
201}
202
203/**
204 * @brief Checks if tensors require broadcasting based on their shapes.
205 *
206 * @details This function determines if two ggml_tensors need to be broadcasted for
207 * element-wise operations. Broadcasting is necessary if the shapes of the
208 * tensors are not identical and no dimension in either tensor equals 1.
209 *
210 * @param t0 Pointer to the first ggml_tensor.
211 * @param t1 Pointer to the second ggml_tensor.
212 * @return True if broadcasting is needed, False otherwise.
213 *
214 * @remarks This function iterates over the dimensions of t0 and t1. It checks if each
215 * dimension in t1 differs from t0's corresponding dimension and is not equal
216 * to 1. If such a dimension is found, broadcasting is required to align t1
217 * with t0 for element-wise operations.
218 */
219bool ggml_cann_need_bcast(const ggml_tensor * t0, const ggml_tensor * t1);
220
221/**
222 * @brief Computes broadcast shapes and strides for two ggml_tensors.
223 *
224 * @details This function calculates the broadcast shapes and strides for two ggml_tensors,
225 * following the broadcasting rules similar to numpy. It adjusts dimensions and
226 * strides to ensure compatibility for element-wise operations where one tensor
227 * can be broadcasted to match the shape of another tensor.
228 *
229 * @param src0 Pointer to the first ggml_tensor.
230 * @param src1 Pointer to the second ggml_tensor.
231 * @param bcast_ne_src0 Output array to store broadcasted dimensions for src0.
232 * @param bcast_ne_src1 Output array to store broadcasted dimensions for src1.
233 * @param bcast_nb_src0 Output array to store broadcasted strides for src0.
234 * @param bcast_nb_src1 Output array to store broadcasted strides for src1.
235 * @return Number of dimensions in the broadcasted shape.
236 *
237 * @pre ggml_can_repeat(src1, src0) must return true, indicating src1 can be broadcasted
238 * to match src0.
239 *
240 * @remarks This function iterates over the dimensions of src0 and src1, calculating the
241 * necessary broadcast dimensions and strides. If a dimension requires broadcasting
242 * (i.e., its size in src1 is smaller than in src0), an additional dimension is
243 * added with size calculated to match src0's dimension. This adjustment ensures
244 * that src1 can be element-wise broadcasted to src0's shape.
245 *
246 * How it works:
247 *
248 * if dim0 has padding.
249 * a -> (2, 2) padding = 2
250 * a: [[1, 2, *, *]
251 * [2, 3, *, *]]
252 * nb = (8, 4, 2)
253 *
254 * if a should bcast with b -> (2, 4)
255 * b' -> (2, 2, 2)
256 * b : [[1, 2, 3, 4, *, *]
257 * [5, 6, 7, 8, *, *]]
258 * nb = (12, 6, 1)
259 *
260 * after bcast:
261 * a' -> (2, 1, 2)
262 * a': [[[1, 2], *, *]
263 * [[2, 3], *, *]]
264 * nb = (8, 4, 2, 1)
265 *
266 * b' : [[[1, 2], [3, 4], *, *]
267 * [[5, 6], [7, 8], *, *]]
268 * nb = (12, 6, 2, 1)
269 * \endcode
270 *
271 * dim1 in a inserted dim, should add nb for dim1,
272 * and all other nb moves to next in order.
273 */
274int64_t ggml_cann_get_bcast_shape(const ggml_tensor * src0,
275 const ggml_tensor * src1,
276 int64_t * bcast_ne_src0,
277 int64_t * bcast_ne_src1,
278 size_t * bcast_nb_src0,
279 size_t * bcast_nb_src1);
280
281// Bcast macro to avoid duplicate code.
282#define BCAST_SHAPE(src0, src1) \
283 int64_t bcast_##src0##_ne[GGML_MAX_DIMS * 2]; \
284 int64_t bcast_##src1##_ne[GGML_MAX_DIMS * 2]; \
285 size_t bcast_##src0##_nb[GGML_MAX_DIMS * 2]; \
286 size_t bcast_##src1##_nb[GGML_MAX_DIMS * 2]; \
287 int64_t bcast_dims = ggml_cann_get_bcast_shape(src0, src1, bcast_##src0##_ne, bcast_##src1##_ne, \
288 bcast_##src0##_nb, bcast_##src1##_nb);
289
290#define BCAST_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
291
292/**
293 * @brief Calculates broadcast shapes for matrix multiplication.
294 *
295 * @details This function computes the broadcast shapes required for matrix multiplication
296 * based on the input, weight, and destination tensor shapes. It ensures that the
297 * dimensions of weight tensors are expanded appropriately to satisfy matrix
298 * multiplication broadcast rules.
299 *
300 * @param input_ne Array containing the dimensions of the input tensor.
301 * @param weight_ne Array containing the dimensions of the weight tensor.
302 * @param dst_ne Array containing the dimensions of the destination tensor.
303 * @param input_nb Array containing the strides of the input tensor.
304 * @param weight_nb Array containing the strides of the weight tensor.
305 * @param dst_nb Array containing the strides of the destination tensor.
306 * @param bcast_input_ne Output array for broadcasted input tensor dimensions.
307 * @param bcast_weight_ne Output array for broadcasted weight tensor dimensions.
308 * @param bcast_dst_ne Output array for broadcasted destination tensor dimensions.
309 * @param bcast_input_nb Output array for broadcasted input tensor strides.
310 * @param bcast_weight_nb Output array for broadcasted weight tensor strides.
311 * @param bcast_dst_nb Output array for broadcasted destination tensor strides.
312 * @return The number of dimensions in the broadcasted tensors.
313 *
314 * @remarks This function iterates over the tensor dimensions and calculates the broadcast
315 * shapes needed for matrix multiplication. It ensures that dimensions where
316 * weight tensor requires expansion are appropriately handled to conform with
317 * broadcasting rules.
318 * @note compare with ggml_cann_get_bcast_shape, mul_mat broadcast need add this new dim
319 * before cast dim.
320 * @sa ggml_cann_get_bcast_shape
321 */
322int64_t ggml_cann_get_mulmat_bcast_shape(const int64_t * input_ne,
323 const int64_t * weight_ne,
324 const int64_t * dst_ne,
325 const size_t * input_nb,
326 const size_t * weight_nb,
327 const size_t * dst_nb,
328 int64_t * bcast_input_ne,
329 int64_t * bcast_weight_ne,
330 int64_t * bcast_dst_ne,
331 size_t * bcast_input_nb,
332 size_t * bcast_weight_nb,
333 size_t * bcast_dst_nb);
334
335// Bcast macro to avoid duplicate code.
336#define BCAST_MUL_MAT_SHAPE(input, weight, dst) \
337 int64_t bcast_##input##_ne[GGML_MAX_DIMS * 2]; \
338 int64_t bcast_##weight##_ne[GGML_MAX_DIMS * 2]; \
339 int64_t bcast_##dst##_ne[GGML_MAX_DIMS * 2]; \
340 size_t bcast_##input##_nb[GGML_MAX_DIMS * 2]; \
341 size_t bcast_##weight##_nb[GGML_MAX_DIMS * 2]; \
342 size_t bcast_##dst##_nb[GGML_MAX_DIMS * 2]; \
343 int64_t bcast_dims = ggml_cann_get_mulmat_bcast_shape( \
344 input->ne, weight->ne, dst->ne, input->nb, weight->nb, dst->nb, bcast_##input##_ne, bcast_##weight##_ne, \
345 bcast_##dst##_ne, bcast_##input##_nb, bcast_##weight##_nb, bcast_##dst##_nb);
346
347#define BCAST_MUL_MAT_PARAM(tensor) bcast_##tensor##_ne, bcast_##tensor##_nb, bcast_dims
348
349#endif // CANN_ACL_TENSOR_H