1//
 2// MIT license
 3// Copyright (C) 2024 Intel Corporation
 4// SPDX-License-Identifier: MIT
 5//
 6
 7//
 8// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 9// See https://llvm.org/LICENSE.txt for license information.
10// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11//
12
13#ifndef GGML_SYCL_GEMM_HPP
14#define GGML_SYCL_GEMM_HPP
15
16#include "ggml-sycl.h"
17
18#if GGML_SYCL_DNNL
19
20#include "dnnl.hpp"
21#include "dnnl_sycl.hpp"
22
23class DnnlGemmWrapper {
24public:
25    using dt = dnnl::memory::data_type;
26    using tag = dnnl::memory::format_tag;
27
28    template<typename T>
29    static constexpr dt to_dt() {
30        if constexpr (std::is_same_v<T, float>) return dt::f32;
31        else if constexpr (std::is_same_v<T, sycl::half>) return dt::f16;
32        else static_assert(0);
33    }
34
35    static void gemm(ggml_backend_sycl_context & ctx, int m, int n, int k,
36        const void * a, dt at, dnnl_dim_t stra0, dnnl_dim_t stra1, dnnl_dim_t stra2,
37        const void * b, dt bt, dnnl_dim_t strb0, dnnl_dim_t strb1, dnnl_dim_t strb2,
38        void * c, dt ct, const queue_ptr & q, dnnl_dim_t batches_a, dnnl_dim_t batches_b) {
39
40        auto stream = ctx.stream_dnnl(q);
41        auto eng = ctx.engine_dnnl(q);
42
43        dnnl::memory::dims a_dims = {batches_a, m, k };
44        dnnl::memory::dims a_strides = {stra2, stra1, stra0};
45        const auto a_in_md = dnnl::memory::desc(a_dims, at, a_strides);
46
47        dnnl::memory::dims b_dims = {batches_b, k, n };
48        dnnl::memory::dims b_strides = {strb2, strb0, strb1};
49        const auto b_in_md = dnnl::memory::desc(b_dims, bt, b_strides);
50
51        dnnl::memory::dims c_dims = { std::max(batches_a, batches_b), m, n};
52        dnnl::memory::dims c_strides = {m*n, 1,  m };
53        const auto c_md    = dnnl::memory::desc(c_dims, ct, c_strides);
54        dnnl::primitive_attr primitive_attr;
55        primitive_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user);
56
57#ifdef GGML_SYCL_F16
58        primitive_attr.set_fpmath_mode(dnnl::fpmath_mode::f16);
59#endif
60
61        auto a_mem = dnnl::memory(a_in_md, eng, const_cast<void*>(a));
62        auto b_mem = dnnl::memory(b_in_md, eng, const_cast<void*>(b));
63        auto matmul_pd = dnnl::matmul::primitive_desc(eng, a_in_md, b_in_md, c_md, primitive_attr);
64        auto c_mem = dnnl::memory(matmul_pd.dst_desc(), eng, c);
65
66        auto scratchpad_md = matmul_pd.scratchpad_desc();
67        auto scratchpad_mem = ctx.get_scratchpad_mem(scratchpad_md, eng, q);
68
69        auto matmul_prim = dnnl::matmul(matmul_pd);
70
71        std::unordered_map<int, dnnl::memory> matmul_args;
72        matmul_args.insert({ DNNL_ARG_SRC, a_mem });
73        matmul_args.insert({ DNNL_ARG_WEIGHTS, b_mem });
74
75        matmul_args.insert({ DNNL_ARG_DST, c_mem });
76        matmul_args.insert({ DNNL_ARG_SCRATCHPAD, scratchpad_mem });
77
78        matmul_prim.execute(stream, matmul_args);
79    }
80
81    static void row_gemm(ggml_backend_sycl_context & ctx, int m, int n, int k,
82        const void * a, dt at, const void * b, dt bt, void * c, dt ct, const queue_ptr & q) {
83
84        gemm(ctx, m, n, k, a, at, 1, k, k * m, b, bt, 1, k, n * k, c, ct, q, 1, 1);
85    }
86};
87
88#endif
89
90#endif // GGML_SYCL_GEMM_HPP