summaryrefslogtreecommitdiff
path: root/llama.cpp/ggml/src/ggml-sycl
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/ggml/src/ggml-sycl
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/ggml/src/ggml-sycl')
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt155
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/add-id.cpp77
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/add-id.hpp8
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/backend.hpp45
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/binbcast.cpp345
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/binbcast.hpp39
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/common.cpp83
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/common.hpp663
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/concat.cpp202
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/concat.hpp20
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/conv.cpp101
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/conv.hpp20
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/convert.cpp676
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/convert.hpp34
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/count-equal.cpp79
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/count-equal.hpp9
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/cpy.cpp602
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/cpy.hpp223
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/dequantize.hpp841
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/dmmv.cpp1162
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/dmmv.hpp27
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp3002
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/element_wise.cpp1216
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/element_wise.hpp96
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/gemm.hpp90
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/getrows.cpp215
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/getrows.hpp20
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp5079
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/gla.cpp106
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/gla.hpp8
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/im2col.cpp136
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/im2col.hpp21
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/mmq.cpp3030
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/mmq.hpp33
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/mmvq.cpp1156
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/mmvq.hpp27
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/norm.cpp654
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/norm.hpp28
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/outprod.cpp47
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/outprod.hpp10
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/pad.cpp97
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/pad.hpp24
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.cpp100
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.hpp10
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/presets.hpp76
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/quantize.hpp133
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/quants.hpp110
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/repeat_back.cpp76
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/repeat_back.hpp8
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/roll.cpp122
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/roll.hpp20
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/rope.cpp477
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/rope.hpp20
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/set.cpp73
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/set.hpp5
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/set_rows.cpp234
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/set_rows.hpp8
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/softmax.cpp426
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/softmax.hpp24
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/ssm_conv.cpp127
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/ssm_conv.hpp5
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp15
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp26
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/tsembd.cpp73
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/tsembd.hpp20
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp1361
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/wkv.cpp293
-rw-r--r--llama.cpp/ggml/src/ggml-sycl/wkv.hpp10
68 files changed, 24358 insertions, 0 deletions
diff --git a/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt b/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt
new file mode 100644
index 0000000..eefdd97
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/CMakeLists.txt
@@ -0,0 +1,155 @@
+message(STATUS "GGML_SYCL_TARGET=${GGML_SYCL_TARGET}")
+
+if (NOT GGML_SYCL_TARGET MATCHES "^(INTEL)$")
+ message(FATAL_ERROR "GGML_SYCL_TARGET: Invalid target, the supported options are [INTEL]")
+endif()
+
+check_cxx_compiler_flag("-fsycl" SUPPORTS_SYCL)
+
+if (DEFINED ENV{ONEAPI_ROOT})
+ message(STATUS "Using oneAPI Release SYCL compiler (icpx).")
+elseif(SUPPORTS_SYCL)
+ message(WARNING "Using open-source SYCL compiler (clang++). Didn't detect ENV {ONEAPI_ROOT}.
+ If you expected the oneAPI Release compiler, please install oneAPI & source it, like:
+ source /opt/intel/oneapi/setvars.sh")
+else()
+ message(FATAL_ERROR "C++ compiler lacks SYCL support.")
+endif()
+message(STATUS "SYCL found")
+#todo: AOT
+
+ggml_add_backend_library(ggml-sycl
+ ggml-sycl.cpp
+ ../../include/ggml-sycl.h
+ )
+
+file(GLOB GGML_HEADERS_SYCL "*.hpp")
+file(GLOB GGML_SOURCES_SYCL "*.cpp")
+target_sources(ggml-sycl PRIVATE ${GGML_HEADERS_SYCL} ${GGML_SOURCES_SYCL})
+
+if (WIN32)
+ # To generate a Visual Studio solution, using Intel C++ Compiler for ggml-sycl is mandatory
+ if( ${CMAKE_GENERATOR} MATCHES "Visual Studio" AND NOT (${CMAKE_GENERATOR_TOOLSET} MATCHES "Intel C"))
+ set_target_properties(ggml-sycl PROPERTIES VS_PLATFORM_TOOLSET "Intel C++ Compiler 2025")
+ set(CMAKE_CXX_COMPILER "icx")
+ set(CMAKE_CXX_COMPILER_ID "IntelLLVM")
+ endif()
+endif()
+
+macro(detect_and_find_package package_name)
+ set(test_source "
+ cmake_minimum_required(VERSION ${CMAKE_VERSION})
+ project(check_package LANGUAGES CXX)
+ find_package(${package_name} QUIET)
+ ")
+
+ set(test_dir "${CMAKE_CURRENT_BINARY_DIR}/check_package_${package_name}")
+ file(WRITE "${test_dir}/CMakeLists.txt" "${test_source}")
+
+ set(cmake_args "")
+ if(CMAKE_GENERATOR)
+ list(APPEND cmake_args "-G" "${CMAKE_GENERATOR}")
+ endif()
+ if(CMAKE_GENERATOR_PLATFORM)
+ list(APPEND cmake_args "-A" "${CMAKE_GENERATOR_PLATFORM}")
+ endif()
+ if(CMAKE_GENERATOR_TOOLSET)
+ list(APPEND cmake_args "-T" "${CMAKE_GENERATOR_TOOLSET}")
+ endif()
+ if(CMAKE_CXX_COMPILER)
+ list(APPEND cmake_args "-DCMAKE_CXX_COMPILER=${CMAKE_CXX_COMPILER}")
+ endif()
+
+ execute_process(
+ COMMAND ${CMAKE_COMMAND} ${cmake_args} .
+ WORKING_DIRECTORY "${test_dir}"
+ RESULT_VARIABLE result
+ OUTPUT_QUIET
+ ERROR_QUIET
+ )
+
+ if(result EQUAL 0)
+ find_package(${package_name} ${ARGN})
+ else()
+ message(WARNING "Detection of ${package_name} failed. The package might be broken or incompatible.")
+ set(${package_name}_FOUND FALSE)
+ endif()
+endmacro()
+
+detect_and_find_package(IntelSYCL)
+if (IntelSYCL_FOUND)
+ # Use oneAPI CMake when possible
+ target_link_libraries(ggml-sycl PRIVATE IntelSYCL::SYCL_CXX)
+else()
+ # Fallback to the simplest way of enabling SYCL when using intel/llvm nightly for instance
+ target_compile_options(ggml-sycl PRIVATE "-fsycl")
+ target_link_options(ggml-sycl PRIVATE "-fsycl")
+endif()
+
+target_compile_options(ggml-sycl PRIVATE "-Wno-narrowing")
+
+# Link against oneDNN
+set(GGML_SYCL_DNNL 0)
+if(GGML_SYCL_DNN)
+ find_package(DNNL)
+ if(DNNL_FOUND)
+ if (NOT DEFINED DNNL_GPU_VENDOR)
+ # default to intel target
+ set(DNNL_GPU_VENDOR "INTEL")
+ if(NOT "${GGML_SYCL_TARGET}" STREQUAL "INTEL")
+ message(WARNING "oneDNN builds bundled with oneapi release only support INTEL target")
+ endif()
+ endif()
+
+ # Verify oneDNN was compiled for the same target as llama
+ if("${GGML_SYCL_TARGET}" STREQUAL "${DNNL_GPU_VENDOR}")
+ target_link_libraries(ggml-sycl PRIVATE DNNL::dnnl)
+ set(GGML_SYCL_DNNL 1)
+ get_target_property(CONFIGS DNNL::dnnl IMPORTED_CONFIGURATIONS)
+ foreach(CONFIG ${CONFIGS})
+ get_target_property(DNNL_LIB DNNL::dnnl IMPORTED_LOCATION_${CONFIG})
+ message(STATUS "Found oneDNN: ${DNNL_LIB}")
+ endforeach()
+ else()
+ message(WARNING
+ "oneDNN must be compiled for the same target as llama.cpp.
+ llama.cpp: ${GGML_SYCL_TARGET}, oneDNN: ${DNNL_GPU_VENDOR}.
+ Disabling oneDNN support.")
+ endif()
+ else()
+ message(STATUS "oneDNN not found, disabling oneDNN support")
+ endif()
+else()
+ message(STATUS "oneDNN support disabled by the user")
+endif()
+target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_DNNL=${GGML_SYCL_DNNL})
+
+if (GGML_SYCL_F16)
+ add_compile_definitions(GGML_SYCL_F16)
+endif()
+
+if (GGML_SYCL_TARGET STREQUAL "INTEL")
+ add_compile_definitions(GGML_SYCL_WARP_SIZE=16)
+ target_link_options(ggml-sycl PRIVATE -Xs -ze-intel-greater-than-4GB-buffer-required)
+
+ # Link against Intel oneMKL
+ if (CMAKE_CXX_COMPILER_ID STREQUAL "Clang")
+ set(SYCL_COMPILER ON)
+ endif()
+ find_package(MKL REQUIRED)
+ target_link_libraries(ggml-sycl PRIVATE MKL::MKL_SYCL::BLAS)
+else()
+ # default for other target
+ message(FATAL_ERROR "GGML_SYCL_TARGET is not supported")
+ add_compile_definitions(GGML_SYCL_WARP_SIZE=32)
+endif()
+
+if (GGML_SYCL_GRAPH)
+ target_compile_definitions(ggml-sycl PRIVATE GGML_SYCL_GRAPH)
+endif()
+
+if (GGML_SYCL_DEVICE_ARCH)
+ target_compile_options(ggml-sycl PRIVATE -Xsycl-target-backend --offload-arch=${GGML_SYCL_DEVICE_ARCH})
+ target_link_options(ggml-sycl PRIVATE -Xsycl-target-backend --offload-arch=${GGML_SYCL_DEVICE_ARCH})
+endif()
+
diff --git a/llama.cpp/ggml/src/ggml-sycl/add-id.cpp b/llama.cpp/ggml/src/ggml-sycl/add-id.cpp
new file mode 100644
index 0000000..00c073c
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/add-id.cpp
@@ -0,0 +1,77 @@
+#include <sycl/sycl.hpp>
+#include "common.hpp"
+#include "add-id.hpp"
+
+static void add_id_kernel(
+ const float* src0,
+ const float* src1,
+ const int32_t* src2,
+ float* dst,
+ int64_t ne0,
+ int64_t ne1,
+ size_t nb01,
+ size_t nb02,
+ size_t nb11,
+ size_t nb21,
+ sycl::nd_item<3> item_ct1) {
+ const int64_t i1 = item_ct1.get_group(2);
+ const int64_t i2 = item_ct1.get_group(1);
+
+ const int i11 =
+ *(const int32_t*)((const char*)src2 + i1 * sizeof(int32_t) + i2 * nb21);
+
+ const size_t nb1 = ne0 * sizeof(float);
+ const size_t nb2 = ne1 * nb1;
+
+ float* dst_row = (float*)((char*)dst + i1 * nb1 + i2 * nb2);
+ const float* src0_row =
+ (const float*)((const char*)src0 + i1 * nb01 + i2 * nb02);
+ const float* src1_row = (const float*)((const char*)src1 + i11 * nb11);
+
+ for (int64_t i0 = item_ct1.get_local_id(2); i0 < ne0;
+ i0 += item_ct1.get_local_range(2)) {
+ dst_row[i0] = src0_row[i0] + src1_row[i0];
+ }
+}
+
+void ggml_sycl_add_id(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+ const ggml_tensor* src0 = dst->src[0];
+ const ggml_tensor* src1 = dst->src[1];
+ const ggml_tensor* src2 = dst->src[2];
+
+ GGML_TENSOR_TERNARY_OP_LOCALS
+
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT(src2->type == GGML_TYPE_I32);
+
+ GGML_ASSERT(nb00 == sizeof(float));
+ GGML_ASSERT(nb10 == sizeof(float));
+ GGML_ASSERT(nb20 == sizeof(int32_t));
+
+ const float* src0_d = (const float*)src0->data;
+ const float* src1_d = (const float*)src1->data;
+ const int32_t* src2_d = (const int32_t*)src2->data;
+ float* dst_d = (float*)dst->data;
+
+ int threads = std::min((int)ne00, 768); // cols
+ ctx.stream()->parallel_for(
+ sycl::nd_range<3>(
+ sycl::range<3>(1, ne02, ne01) * sycl::range<3>(1, 1, threads),
+ sycl::range<3>(1, 1, threads)),
+ [=](sycl::nd_item<3> item_ct1) {
+ add_id_kernel(
+ src0_d,
+ src1_d,
+ src2_d,
+ dst_d,
+ ne0,
+ ne1,
+ nb01,
+ nb02,
+ nb11,
+ nb21,
+ item_ct1);
+ });
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/add-id.hpp b/llama.cpp/ggml/src/ggml-sycl/add-id.hpp
new file mode 100644
index 0000000..e1b09ee
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/add-id.hpp
@@ -0,0 +1,8 @@
+#ifndef GGML_SYCL_ADD_ID_HPP
+#define GGML_SYCL_ADD_ID_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_add_id(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_ADD_ID_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/backend.hpp b/llama.cpp/ggml/src/ggml-sycl/backend.hpp
new file mode 100644
index 0000000..75657f3
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/backend.hpp
@@ -0,0 +1,45 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_BACKEND_HPP
+#define GGML_SYCL_BACKEND_HPP
+
+#include "binbcast.hpp"
+#include "common.hpp"
+#include "concat.hpp"
+#include "conv.hpp"
+#include "convert.hpp"
+#include "count-equal.hpp"
+#include "cpy.hpp"
+#include "dequantize.hpp"
+#include "dmmv.hpp"
+#include "element_wise.hpp"
+#include "gla.hpp"
+#include "im2col.hpp"
+#include "mmq.hpp"
+#include "mmvq.hpp"
+#include "norm.hpp"
+#include "outprod.hpp"
+#include "pad.hpp"
+#include "quantize.hpp"
+#include "quants.hpp"
+#include "roll.hpp"
+#include "rope.hpp"
+#include "set_rows.hpp"
+#include "ssm_conv.hpp"
+#include "softmax.hpp"
+#include "tsembd.hpp"
+#include "wkv.hpp"
+#include "pad_reflect_1d.hpp"
+
+
+#endif // GGML_SYCL_BACKEND_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp b/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp
new file mode 100644
index 0000000..0a3883a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/binbcast.cpp
@@ -0,0 +1,345 @@
+#include "binbcast.hpp"
+
+#include <cstddef>
+#include <cstdint>
+#include <sycl/sycl.hpp>
+
+#include "ggml.h"
+
+template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
+static void k_bin_bcast(const src0_t * src0, const src1_t * src1, dst_t * dst,
+ int ne0, int ne1, int ne2, int ne3,
+ int ne10, int ne11, int ne12, int ne13,
+ /*int s0, */ int s1, int s2, int s3,
+ /*int s00,*/ int s01, int s02, int s03,
+ /*int s10,*/ int s11, int s12, int s13,
+ const sycl::nd_item<3> &item_ct1) {
+ const int i0s = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
+ item_ct1.get_local_id(2);
+ const int i1 = (item_ct1.get_local_range(1) * item_ct1.get_group(1) +
+ item_ct1.get_local_id(1));
+ const int i2 = (item_ct1.get_local_range(0) * item_ct1.get_group(0) +
+ item_ct1.get_local_id(0)) /
+ ne3;
+ const int i3 = (item_ct1.get_local_range(0) * item_ct1.get_group(0) +
+ item_ct1.get_local_id(0)) %
+ ne3;
+
+ if (i0s >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
+ return;
+ }
+
+ const int i11 = i1 % ne11;
+ const int i12 = i2 % ne12;
+ const int i13 = i3 % ne13;
+
+ const size_t i_src0 = i3*s03 + i2*s02 + i1*s01;
+ const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
+ const size_t i_dst = i3*s3 + i2*s2 + i1*s1;
+
+ const src0_t * src0_row = src0 + i_src0;
+ const src1_t * src1_row = src1 + i_src1;
+ dst_t * dst_row = dst + i_dst;
+
+ for (int i0 = i0s; i0 < ne0;
+ i0 += item_ct1.get_local_range(2) * item_ct1.get_group_range(2)) {
+ const int i10 = i0 % ne10;
+ dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
+ }
+}
+
+template<float (*bin_op)(const float, const float), typename src0_t, typename src1_t, typename dst_t>
+static void k_bin_bcast_unravel(const src0_t * src0, const src1_t * src1, dst_t * dst,
+ int ne0, int ne1, int ne2, int ne3,
+ int ne10, int ne11, int ne12, int ne13,
+ /*int s0, */ int s1, int s2, int s3,
+ /*int s00,*/ int s01, int s02, int s03,
+ /*int s10,*/ int s11, int s12, int s13,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
+ item_ct1.get_local_id(2);
+
+ const int i3 = i/(ne2*ne1*ne0);
+ const int i2 = (i/(ne1*ne0)) % ne2;
+ const int i1 = (i/ne0) % ne1;
+ const int i0 = i % ne0;
+
+ if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
+ return;
+ }
+
+ const int i11 = i1 % ne11;
+ const int i12 = i2 % ne12;
+ const int i13 = i3 % ne13;
+
+ const size_t i_src0 = i3*s03 + i2*s02 + i1*s01;
+ const size_t i_src1 = i13*s13 + i12*s12 + i11*s11;
+ const size_t i_dst = i3*s3 + i2*s2 + i1*s1;
+
+ const src0_t * src0_row = src0 + i_src0;
+ const src1_t * src1_row = src1 + i_src1;
+ dst_t * dst_row = dst + i_dst;
+
+ const int i10 = i0 % ne10;
+ dst_row[i0] = (dst_t)bin_op(src0 ? (float)src0_row[i0] : 0.0f, (float)src1_row[i10]);
+}
+
+
+template<float (*bin_op)(const float, const float)>
+struct bin_bcast_sycl {
+ template <typename src0_t, typename src1_t, typename dst_t>
+ void operator()(const src0_t * src0_dd, const src1_t * src1_dd, dst_t * dst_dd, const int64_t ne00,
+ const int64_t ne01, const int64_t ne02, const int64_t ne03, const int64_t ne10, const int64_t ne11,
+ const int64_t ne12, const int64_t ne13, const int64_t ne0, const int64_t ne1, const int64_t ne2,
+ const int64_t ne3, const size_t nb00, const size_t nb01, const size_t nb02, const size_t nb03,
+ const size_t nb10, const size_t nb11, const size_t nb12, const size_t nb13, const size_t nb0,
+ const size_t nb1, const size_t nb2, const size_t nb3, const bool src0_is_contiguous,
+ const bool src1_is_contiguous, const bool dst_is_contiguous, queue_ptr stream) {
+ int nr0 = ne10 / ne0;
+ int nr1 = ne11/ne1;
+ int nr2 = ne12/ne2;
+ int nr3 = ne13/ne3;
+
+ int nr[4] = { nr0, nr1, nr2, nr3 };
+
+ // collapse dimensions until first broadcast dimension
+ int64_t cne[] = {ne0, ne1, ne2, ne3};
+ int64_t cne0[] = {ne00, ne01, ne02, ne03};
+ int64_t cne1[] = {ne10, ne11, ne12, ne13};
+ size_t cnb[] = {nb0, nb1, nb2, nb3};
+ size_t cnb0[] = {nb00, nb01, nb02, nb03};
+ size_t cnb1[] = {nb10, nb11, nb12, nb13};
+ auto collapse = [](int64_t cne[]) {
+ cne[0] *= cne[1];
+ cne[1] = cne[2];
+ cne[2] = cne[3];
+ cne[3] = 1;
+ };
+
+ auto collapse_nb = [](size_t cnb[], int64_t cne[]) {
+ cnb[1] *= cne[1];
+ cnb[2] *= cne[2];
+ cnb[3] *= cne[3];
+ };
+
+ if (src0_is_contiguous && src1_is_contiguous && dst_is_contiguous) {
+ for (int i = 0; i < 4; i++) {
+ if (nr[i] != 1) {
+ break;
+ }
+ if (i > 0) {
+ collapse_nb(cnb, cne);
+ collapse_nb(cnb0, cne0);
+ collapse_nb(cnb1, cne1);
+ collapse(cne);
+ collapse(cne0);
+ collapse(cne1);
+ }
+ }
+ }
+ {
+ int64_t ne0 = cne[0];
+ int64_t ne1 = cne[1];
+ int64_t ne2 = cne[2];
+ int64_t ne3 = cne[3];
+
+ int64_t ne10 = cne1[0];
+ int64_t ne11 = cne1[1];
+ int64_t ne12 = cne1[2];
+ int64_t ne13 = cne1[3];
+
+ size_t nb0 = cnb[0];
+ size_t nb1 = cnb[1];
+ size_t nb2 = cnb[2];
+ size_t nb3 = cnb[3];
+
+ size_t nb00 = cnb0[0];
+ size_t nb01 = cnb0[1];
+ size_t nb02 = cnb0[2];
+ size_t nb03 = cnb0[3];
+
+ size_t nb10 = cnb1[0];
+ size_t nb11 = cnb1[1];
+ size_t nb12 = cnb1[2];
+ size_t nb13 = cnb1[3];
+
+ size_t s0 = nb0 / sizeof(dst_t);
+ size_t s1 = nb1 / sizeof(dst_t);
+ size_t s2 = nb2 / sizeof(dst_t);
+ size_t s3 = nb3 / sizeof(dst_t);
+
+ size_t s10 = nb10 / sizeof(src1_t);
+ size_t s11 = nb11 / sizeof(src1_t);
+ size_t s12 = nb12 / sizeof(src1_t);
+ size_t s13 = nb13 / sizeof(src1_t);
+
+ size_t s00 = nb00 / sizeof(src0_t);
+ size_t s01 = nb01 / sizeof(src0_t);
+ size_t s02 = nb02 / sizeof(src0_t);
+ size_t s03 = nb03 / sizeof(src0_t);
+
+ GGML_UNUSED(s00);
+
+ GGML_ASSERT(nb0 % sizeof(dst_t) == 0);
+ GGML_ASSERT(nb1 % sizeof(dst_t) == 0);
+ GGML_ASSERT(nb2 % sizeof(dst_t) == 0);
+ GGML_ASSERT(nb3 % sizeof(dst_t) == 0);
+
+ GGML_ASSERT(nb00 % sizeof(src0_t) == 0);
+ GGML_ASSERT(nb01 % sizeof(src0_t) == 0);
+ GGML_ASSERT(nb02 % sizeof(src0_t) == 0);
+ GGML_ASSERT(nb03 % sizeof(src0_t) == 0);
+
+ GGML_ASSERT(nb10 % sizeof(src1_t) == 0);
+ GGML_ASSERT(nb11 % sizeof(src1_t) == 0);
+ GGML_ASSERT(nb12 % sizeof(src1_t) == 0);
+ GGML_ASSERT(nb13 % sizeof(src1_t) == 0);
+
+ GGML_ASSERT(s0 == 1);
+ GGML_ASSERT(s10 == 1);
+
+ const int block_size = 128;
+
+ int64_t hne0 = std::max(ne0/2LL, 1LL);
+
+ sycl::range<3> block_dims(1, 1, 1);
+ block_dims[2] = std::min<unsigned int>(hne0, block_size);
+ block_dims[1] = std::min<unsigned int>(
+ ne1, block_size / (unsigned int)block_dims[2]);
+ block_dims[0] = std::min(
+ std::min<unsigned int>(
+ ne2 * ne3, block_size / (unsigned int)block_dims[2] /
+ (unsigned int)block_dims[1]),
+ 64U);
+
+ sycl::range<3> block_nums(
+ (ne2 * ne3 + block_dims[0] - 1) / block_dims[0],
+ (ne1 + block_dims[1] - 1) / block_dims[1],
+ (hne0 + block_dims[2] - 1) / block_dims[2]);
+
+ if (block_nums[0] > 65535) {
+ // this is the maximum number of blocks in z direction, fallback to 1D grid kernel
+ int block_num = (ne0*ne1*ne2*ne3 + block_size - 1) / block_size;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, block_num) *
+ sycl::range<3>(1, 1, block_size),
+ sycl::range<3>(1, 1, block_size)),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_bin_bcast_unravel<bin_op>(
+ src0_dd, src1_dd, dst_dd, ne0, ne1, ne2, ne3,
+ ne10, ne11, ne12, ne13, s1, s2, s3, s01, s02,
+ s03, s11, s12, s13, item_ct1);
+ });
+ }
+ } else {
+ /*
+ DPCT1049:16: The work-group size passed to the SYCL kernel may
+ exceed the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if
+ needed.
+ */
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_bin_bcast<bin_op>(src0_dd, src1_dd, dst_dd, ne0, ne1,
+ ne2, ne3, ne10, ne11, ne12, ne13,
+ s1, s2, s3, s01, s02, s03, s11, s12, s13,
+ item_ct1);
+ });
+ }
+ }
+ }
+};
+
+template <class op>
+inline void ggml_sycl_op_bin_bcast(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1,
+ ggml_tensor * dst) {
+ dpct::queue_ptr main_stream = ctx.stream();
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32) {
+ op()((const float *) src0->data, (const float *) src1->data, (float *) dst->data, ne00, ne01, ne02, ne03, ne10,
+ ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2, nb3,
+ ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream);
+ } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16 && dst->type == GGML_TYPE_F16) {
+ op()((const sycl::half *) src0->data, (const sycl::half *) src1->data, (sycl::half *) dst->data, ne00, ne01,
+ ne02, ne03, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13,
+ nb0, nb1, nb2, nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst),
+ main_stream);
+ } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F16) {
+ op()((const sycl::half *) src0->data, (const float *) src1->data, (sycl::half *) dst->data, ne00, ne01, ne02,
+ ne03, ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1,
+ nb2, nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream);
+ } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32 && dst->type == GGML_TYPE_I32) {
+ op()((const int32_t *) src0->data, (const int32_t *) src1->data, (int32_t *) dst->data, ne00, ne01, ne02, ne03,
+ ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2,
+ nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream);
+ } else if (src0->type == GGML_TYPE_I16 && src1->type == GGML_TYPE_I16 && dst->type == GGML_TYPE_I16) {
+ op()((const int16_t *) src0->data, (const int16_t *) src1->data, (int16_t *) dst->data, ne00, ne01, ne02, ne03,
+ ne10, ne11, ne12, ne13, ne0, ne1, ne2, ne3, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb0, nb1, nb2,
+ nb3, ggml_is_contiguous(src0), ggml_is_contiguous(src1), ggml_is_contiguous(dst), main_stream);
+ } else {
+ fprintf(stderr, "%s: unsupported types: dst: %s, src0: %s, src1: %s\n", __func__, ggml_type_name(dst->type),
+ ggml_type_name(src0->type), ggml_type_name(src1->type));
+ GGML_ABORT("fatal error");
+ }
+}
+
+inline void ggml_sycl_op_add(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+
+ ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_add>>(ctx, dst->src[0], dst->src[1], dst);
+}
+
+inline void ggml_sycl_op_sub(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+
+ ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_sub>>(ctx, dst->src[0], dst->src[1], dst);
+}
+
+inline void ggml_sycl_op_mul(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+
+ ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_mul>>(ctx, dst->src[0], dst->src[1], dst);
+}
+
+inline void ggml_sycl_op_div(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+
+ ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_div>>(ctx, dst->src[0], dst->src[1], dst);
+}
+
+inline void ggml_sycl_op_repeat(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+ ggml_sycl_op_bin_bcast<bin_bcast_sycl<op_repeat>>(ctx, dst, dst->src[0], dst);
+}
+
+
+void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_add(ctx, dst);
+}
+
+void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_sub(ctx, dst);
+}
+
+void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_mul(ctx, dst);
+}
+
+void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_div(ctx, dst);
+}
+
+void ggml_sycl_repeat(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_repeat(ctx, dst);
+}
+
diff --git a/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp b/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp
new file mode 100644
index 0000000..9cce0f0
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/binbcast.hpp
@@ -0,0 +1,39 @@
+#ifndef GGML_SYCL_BINBCAST_HPP
+#define GGML_SYCL_BINBCAST_HPP
+#include "common.hpp"
+
+
+static __dpct_inline__ float op_repeat(const float a, const float b) {
+ return b;
+ GGML_UNUSED(a);
+}
+
+static __dpct_inline__ float op_add(const float a, const float b) {
+ return a + b;
+}
+
+static __dpct_inline__ float op_sub(const float a, const float b) {
+ return a - b;
+}
+
+static __dpct_inline__ float op_mul(const float a, const float b) {
+ return a * b;
+}
+
+static __dpct_inline__ float op_div(const float a, const float b) {
+ return a / b;
+}
+
+void ggml_sycl_add(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_sub(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_mul(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_div(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_repeat(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+
+#endif //GGML_SYCL_BINBCAST_HPP
+
diff --git a/llama.cpp/ggml/src/ggml-sycl/common.cpp b/llama.cpp/ggml/src/ggml-sycl/common.cpp
new file mode 100644
index 0000000..05fd5ef
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/common.cpp
@@ -0,0 +1,83 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include "common.hpp"
+
+#include "ggml-backend-impl.h"
+#include "ggml-impl.h"
+
+int get_current_device_id() {
+ return dpct::dev_mgr::instance().current_device_id();
+}
+
+void* ggml_sycl_host_malloc(size_t size) try {
+ if (getenv("GGML_SYCL_NO_PINNED") != nullptr) {
+ return nullptr;
+ }
+
+ void* ptr = nullptr;
+ // allow to use dpct::get_in_order_queue() for host malloc
+ dpct::err0 err = CHECK_TRY_ERROR(
+ ptr = (void*)sycl::malloc_host(size, dpct::get_in_order_queue()));
+
+ if (err != 0) {
+ // clear the error
+ GGML_LOG_ERROR("WARNING: failed to allocate %.2f MB of pinned memory: %s\n", size / 1024.0 / 1024.0, "syclGetErrorString is not supported");
+ return nullptr;
+ }
+
+ return ptr;
+} catch (sycl::exception const& exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+void ggml_sycl_host_free(void* ptr) try {
+ // allow to use dpct::get_in_order_queue() for host malloc
+ SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(ptr, dpct::get_in_order_queue())));
+} catch (sycl::exception const& exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+bool gpu_has_xmx(sycl::device &dev) {
+ return dev.has(sycl::aspect::ext_intel_matrix);
+}
+
+int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size) {
+ const int64_t max_range = std::numeric_limits<int>::max();
+ int64_t sycl_down_blk_size = block_size;
+ int64_t global_range = accumulate_block_num * sycl_down_blk_size;
+ while(global_range > max_range) {
+ sycl_down_blk_size /= 2;
+ global_range = accumulate_block_num * sycl_down_blk_size;
+ }
+ return sycl_down_blk_size;
+}
+
+void release_extra_gpu(ggml_tensor_extra_gpu * extra, std::vector<queue_ptr> streams) {
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ for (int64_t is = 0; is < GGML_SYCL_MAX_STREAMS; ++is) {
+ if (extra->events[i][is] != nullptr) {
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::destroy_event(extra->events[i][is])));
+ }
+ }
+ if (extra->data_device[i] != nullptr && streams.size()>0) {
+ ggml_sycl_set_device(i);
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(sycl::free(extra->data_device[i], *(streams[i]))));
+ }
+ }
+ delete extra;
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/common.hpp b/llama.cpp/ggml/src/ggml-sycl/common.hpp
new file mode 100644
index 0000000..519638f
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/common.hpp
@@ -0,0 +1,663 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_COMMON_HPP
+#define GGML_SYCL_COMMON_HPP
+
+#include <cstddef>
+#include <fstream>
+#include <iostream>
+#include <string>
+
+#include "dpct/helper.hpp"
+#include "ggml-sycl.h"
+#include "presets.hpp"
+#include "sycl_hw.hpp"
+
+
+#if GGML_SYCL_DNNL
+#include "dnnl.hpp"
+#include "dnnl_sycl.hpp"
+#endif
+
+#define GGML_COMMON_DECL_SYCL
+#define GGML_COMMON_IMPL_SYCL
+/* suppress warning spam */
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wnested-anon-types"
+#include "ggml-common.h"
+#pragma clang diagnostic pop
+#include "ggml-impl.h"
+
+void* ggml_sycl_host_malloc(size_t size);
+void ggml_sycl_host_free(void* ptr);
+
+
+extern int g_ggml_sycl_debug;
+extern int g_ggml_sycl_disable_optimize;
+extern int g_ggml_sycl_prioritize_dmmv;
+
+#if defined(__clang__) && __has_builtin(__builtin_expect)
+// Hint the optimizer to pipeline the more likely following instruction in branches
+# define LIKELY(expr) __builtin_expect(expr, true)
+# define UNLIKELY(expr) __builtin_expect(expr, false)
+#else
+# define LIKELY(expr) (expr)
+# define UNLIKELY(expr) (expr)
+#endif
+
+#define GGML_SYCL_DEBUG(...) \
+ do { \
+ if (UNLIKELY(g_ggml_sycl_debug)) \
+ fprintf(stderr, __VA_ARGS__); \
+ } while (0)
+
+#define CHECK_TRY_ERROR(expr) \
+ [&]() { \
+ try { \
+ expr; \
+ return dpct::success; \
+ } catch (std::exception const& e) { \
+ std::cerr << e.what() << "\nException caught at file:" << __FILE__ \
+ << ", line:" << __LINE__ << ", func:" << __func__ \
+ << std::endl; \
+ return dpct::default_error; \
+ } \
+ }()
+
+
+#define __SYCL_ARCH__ DPCT_COMPATIBILITY_TEMP
+#define VER_4VEC 610 // todo for hardward optimize.
+#define VER_GEN9 700 // todo for hardward optimize.
+#define VER_GEN12 1000000 // todo for hardward optimize.
+#define VER_GEN13 (VER_GEN12 + 1030) // todo for hardward optimize.
+
+#define GGML_SYCL_MAX_NODES 8192 // TODO: adapt to hardwares
+
+// define for XMX in Intel GPU
+// TODO: currently, it's not used for XMX really.
+#if !defined(GGML_SYCL_FORCE_MMQ)
+ #define SYCL_USE_XMX
+#endif
+
+// max batch size to use MMQ kernels when tensor cores are available
+#define MMQ_MAX_BATCH_SIZE 32
+
+// dmmv = dequantize_mul_mat_vec
+#ifndef GGML_SYCL_DMMV_X
+#define GGML_SYCL_DMMV_X 32
+#endif
+#ifndef GGML_SYCL_MMV_Y
+#define GGML_SYCL_MMV_Y 1
+#endif
+
+typedef sycl::queue *queue_ptr;
+
+enum ggml_sycl_backend_gpu_mode {
+ SYCL_UNSET_GPU_MODE = -1,
+ SYCL_SINGLE_GPU_MODE = 0,
+ SYCL_MUL_GPU_MODE
+};
+
+static_assert(sizeof(sycl::half) == sizeof(ggml_fp16_t), "wrong fp16 size");
+
+static void crash() {
+ int* ptr = NULL;
+ *ptr = 0;
+}
+
+[[noreturn]] static void ggml_sycl_error(
+ const char* stmt,
+ const char* func,
+ const char* file,
+ const int line,
+ const char* msg) {
+ fprintf(stderr, "SYCL error: %s: %s\n", stmt, msg);
+ fprintf(stderr, " in function %s at %s:%d\n", func, file, line);
+ GGML_ABORT("SYCL error");
+}
+
+#define SYCL_CHECK(err) \
+ do { \
+ auto err_ = (err); \
+ if (err_ != 0) \
+ ggml_sycl_error(#err, __func__, __FILE__, __LINE__, "Exception caught in this line of code."); \
+ } while (0)
+
+#if DPCT_COMPAT_RT_VERSION >= 11100
+#define GGML_SYCL_ASSUME(x) __builtin_assume(x)
+#else
+#define GGML_SYCL_ASSUME(x)
+#endif // DPCT_COMPAT_RT_VERSION >= 11100
+
+#ifdef GGML_SYCL_F16
+typedef sycl::half dfloat; // dequantize float
+typedef sycl::half2 dfloat2;
+#else
+typedef float dfloat; // dequantize float
+typedef sycl::float2 dfloat2;
+#endif // GGML_SYCL_F16
+
+#define MMVQ_MAX_BATCH_SIZE 8
+
+static int g_all_sycl_device_count = -1;
+static bool g_ggml_backend_sycl_buffer_type_initialized = false;
+
+static ggml_sycl_backend_gpu_mode g_ggml_sycl_backend_gpu_mode =
+ SYCL_UNSET_GPU_MODE;
+
+static void* g_scratch_buffer = nullptr;
+static size_t g_scratch_size = 0; // disabled by default
+static size_t g_scratch_offset = 0;
+
+[[noreturn]] static inline void bad_arch(const sycl::stream& stream_ct1) {
+ stream_ct1 << "ERROR: ggml-sycl was compiled without support for the "
+ "current GPU architecture.\n";
+ // __trap();
+ std::exit(1);
+
+ (void)bad_arch; // suppress unused function warning
+}
+
+int get_current_device_id();
+
+inline dpct::err0 ggml_sycl_set_device(const int device) try {
+ int current_device_id;
+ SYCL_CHECK(CHECK_TRY_ERROR(current_device_id = get_current_device_id()));
+
+ // GGML_SYCL_DEBUG("ggml_sycl_set_device device_id=%d,
+ // current_device_id=%d\n", device, current_device);
+ if (device == current_device_id) {
+ return 0;
+ }
+
+ return CHECK_TRY_ERROR(dpct::select_device(device));
+} catch (sycl::exception const& exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ crash();
+ std::exit(1);
+}
+
+//////////////////////
+struct optimize_feature {
+ bool reorder=false;
+};
+
+struct sycl_device_info {
+ int cc; // compute capability
+ int nsm; // number of streaming multiprocessors (CUDA) maps to the maximum
+ // number of compute units on a SYCL device.
+ // size_t smpb; // max. shared memory per block
+ size_t smpbo; // max. shared memory per block (with opt-in)
+ bool vmm; // virtual memory support
+ size_t total_vram;
+ //sycl_hw_info hw_info; \\ device id and aarch, currently not used
+ optimize_feature opt_feature;
+};
+
+
+struct ggml_sycl_device_info {
+ int device_count;
+
+ sycl_device_info devices[GGML_SYCL_MAX_DEVICES] = {};
+
+ std::array<float, GGML_SYCL_MAX_DEVICES> default_tensor_split = {};
+
+ int max_work_group_sizes[GGML_SYCL_MAX_DEVICES] = {0};
+};
+
+const ggml_sycl_device_info & ggml_sycl_info();
+
+struct ggml_sycl_pool {
+ virtual ~ggml_sycl_pool() = default;
+
+ virtual void * alloc(size_t size, size_t * actual_size) = 0;
+ virtual void free(void * ptr, size_t size) = 0;
+};
+
+template<typename T>
+struct ggml_sycl_pool_alloc {
+ ggml_sycl_pool * pool = nullptr;
+ T * ptr = nullptr;
+ size_t actual_size = 0;
+
+ explicit ggml_sycl_pool_alloc(ggml_sycl_pool & pool) : pool(&pool) {
+ }
+
+ ggml_sycl_pool_alloc(ggml_sycl_pool & pool, size_t size) : pool(&pool) {
+ alloc(size);
+ }
+
+ ~ggml_sycl_pool_alloc() {
+ if (ptr != nullptr) {
+ pool->free(ptr, actual_size);
+ }
+ }
+
+ T * realloc(size_t size) {
+ GGML_ASSERT(pool != nullptr);
+ if (ptr)
+ pool->free(ptr, actual_size);
+ ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size);
+ return ptr;
+ }
+
+ // size is in number of elements
+ T * alloc(size_t size) {
+ GGML_ASSERT(pool != nullptr);
+ GGML_ASSERT(ptr == nullptr);
+ ptr = (T *) pool->alloc(size * sizeof(T), &this->actual_size);
+ return ptr;
+ }
+
+ T * alloc(ggml_sycl_pool & pool, size_t size) {
+ this->pool = &pool;
+ return alloc(size);
+ }
+
+ T * get() {
+ return ptr;
+ }
+
+ ggml_sycl_pool_alloc() = default;
+ ggml_sycl_pool_alloc(const ggml_sycl_pool_alloc &) = delete;
+ ggml_sycl_pool_alloc(ggml_sycl_pool_alloc &&) = delete;
+ ggml_sycl_pool_alloc& operator=(const ggml_sycl_pool_alloc &) = delete;
+ ggml_sycl_pool_alloc& operator=(ggml_sycl_pool_alloc &&) = delete;
+};
+
+// backend interface
+
+struct ggml_tensor_extra_gpu {
+ void* data_device[GGML_SYCL_MAX_DEVICES]; // 1 pointer for each device for split
+ // tensors
+ dpct::event_ptr events[GGML_SYCL_MAX_DEVICES]
+ [GGML_SYCL_MAX_STREAMS]; // events for synchronizing multiple GPUs
+ optimize_feature optimized_feature;
+};
+
+void release_extra_gpu(ggml_tensor_extra_gpu * extra, std::vector<queue_ptr> streams={});
+
+namespace sycl_ex = sycl::ext::oneapi::experimental;
+struct ggml_backend_sycl_context {
+ int device;
+ std::string name;
+ optimize_feature opt_feature;
+
+ queue_ptr qptrs[GGML_SYCL_MAX_DEVICES][GGML_SYCL_MAX_STREAMS] = { { nullptr } };
+
+ explicit ggml_backend_sycl_context(int device) :
+ device(device),
+ name(GGML_SYCL_NAME + std::to_string(device)) {
+ opt_feature = ggml_sycl_info().devices[device].opt_feature;
+ }
+
+ queue_ptr stream(int device, int stream) {
+ if (qptrs[device][stream] == nullptr) {
+ qptrs[device][stream] = &(dpct::get_device(device).default_queue());
+ }
+ return qptrs[device][stream];
+ }
+
+ queue_ptr stream() {
+ return stream(device, 0);
+ }
+
+#if GGML_SYCL_DNNL
+ dnnl::engine make_engine(sycl::queue* q) {
+ // Get the device associated with the queue
+ sycl::device dev = q->get_device();
+ // Get the context associated with the queue
+ sycl::context ctx = q->get_context();
+ const dnnl::engine eng = dnnl::sycl_interop::make_engine(dev, ctx);
+ return eng;
+ }
+
+ std::unordered_map<sycl::queue*, dnnl::stream> stream_map;
+ std::unordered_map<sycl::queue*, dnnl::engine> engine_map;
+ dnnl::stream stream_dnnl(int device, int _stream) {
+ auto q = stream(device, _stream);
+ return stream_dnnl(q);
+ }
+ dnnl::engine engine_dnnl(sycl::queue* qptr) {
+ auto it = engine_map.find(qptr);
+ if (it == engine_map.end()) {
+ auto eng = make_engine(qptr);
+ engine_map[qptr] = eng;
+ return eng;
+ }
+ else
+ {
+ return it->second;
+ }
+ }
+ dnnl::stream stream_dnnl(sycl::queue* qptr) {
+ auto it = stream_map.find(qptr);
+ if (it == stream_map.end()) {
+ auto eng = engine_dnnl(qptr);
+ auto stream = dnnl::sycl_interop::make_stream(eng, *qptr);
+ stream_map[qptr] = stream;
+ return stream;
+ }
+ else
+ {
+ return it->second;
+ }
+ }
+ dnnl::stream stream_dnnl() {
+ return stream_dnnl(device, 0);
+ }
+ dnnl::memory get_scratchpad_mem(const dnnl::memory::desc & scratchpad_md,
+ const dnnl::engine & eng, const queue_ptr q) {
+ ggml_sycl_pool_alloc<uint8_t> * pool;
+ auto it = scratchpad_map.find(q);
+ if (it == scratchpad_map.end()) {
+ scratchpad_map[q] = std::make_unique<ggml_sycl_pool_alloc<uint8_t>>(this->pool());
+ pool = scratchpad_map[q].get();
+ } else {
+ pool = it->second.get();
+ }
+
+ size_t scratchpad_size = scratchpad_md.get_size();
+ if (scratchpad_size > pool->actual_size) {
+ pool->realloc(scratchpad_size);
+ }
+ void * mem_ptr = pool->get();
+ return dnnl::memory(scratchpad_md, eng, mem_ptr);
+ }
+#endif
+
+ // pool
+ std::unique_ptr<ggml_sycl_pool> pools[GGML_SYCL_MAX_DEVICES];
+ std::unordered_map<sycl::queue *, std::unique_ptr<ggml_sycl_pool_alloc<uint8_t>>> scratchpad_map;
+
+ std::unique_ptr<ggml_sycl_pool> host_pools[GGML_SYCL_MAX_DEVICES];
+
+ static std::unique_ptr<ggml_sycl_pool> new_pool_for_device(queue_ptr qptr, int device);
+
+ static std::unique_ptr<ggml_sycl_pool> new_pool_for_host(queue_ptr qptr, int device);
+
+ ggml_sycl_pool & pool(int device) {
+ if (pools[device] == nullptr) {
+ pools[device] = new_pool_for_device(stream(device,0), device);
+ }
+ return *pools[device];
+ }
+
+ ggml_sycl_pool & pool() {
+ return pool(device);
+ }
+
+#ifdef GGML_SYCL_GRAPH
+ std::unique_ptr<sycl_ex::command_graph<sycl_ex::graph_state::executable>> exec_graph = nullptr;
+#endif
+
+ ggml_sycl_pool & host_pool(int device) {
+ if (host_pools[device] == nullptr) {
+ host_pools[device] = new_pool_for_host(stream(device, 0), device);
+ }
+ return *host_pools[device];
+ }
+
+ ggml_sycl_pool & host_pool() { return host_pool(device); }
+};
+
+// common device functions
+
+static __dpct_inline__ float warp_reduce_sum(float x,
+ const sycl::nd_item<3>& item_ct1) {
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ x += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), x, mask);
+ }
+ return x;
+}
+
+static __dpct_inline__ sycl::float2
+warp_reduce_sum(sycl::float2 a, const sycl::nd_item<3>& item_ct1) {
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ a.x() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.x(),
+ mask);
+ a.y() += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), a.y(),
+ mask);
+ }
+ return a;
+}
+
+template <int width = WARP_SIZE>
+static __dpct_inline__ int warp_reduce_sum(int x) {
+ return sycl::reduce_over_group(
+ sycl::ext::oneapi::this_work_item::get_sub_group(), x, sycl::plus<>());
+}
+
+template <int width = WARP_SIZE>
+static __dpct_inline__ float warp_reduce_sum(float x) {
+#pragma unroll
+ for (int offset = width / 2; offset > 0; offset >>= 1) {
+ x += dpct::permute_sub_group_by_xor(
+ sycl::ext::oneapi::this_work_item::get_sub_group(), x, offset, width);
+ }
+ return x;
+}
+
+template <int width = WARP_SIZE>
+static __dpct_inline__ sycl::float2 warp_reduce_sum(sycl::float2 a) {
+#pragma unroll
+ for (int offset = width / 2; offset > 0; offset >>= 1) {
+ a.x() += dpct::permute_sub_group_by_xor(
+ sycl::ext::oneapi::this_work_item::get_sub_group(), a.x(), offset,
+ width);
+ a.y() += dpct::permute_sub_group_by_xor(
+ sycl::ext::oneapi::this_work_item::get_sub_group(), a.y(), offset,
+ width);
+ }
+ return a;
+}
+
+template <int width = WARP_SIZE>
+static __dpct_inline__ sycl::half2 warp_reduce_sum(sycl::half2 a) {
+#pragma unroll
+ for (int offset = width / 2; offset > 0; offset >>= 1) {
+ a = a + dpct::permute_sub_group_by_xor(
+ sycl::ext::oneapi::this_work_item::get_sub_group(), a, offset,
+ width);
+ }
+ return a;
+}
+
+static constexpr int ggml_sycl_get_physical_warp_size() {
+ // todo: for old iGPU + dGPU case, need to be changed.
+ return WARP_SIZE;
+}
+
+template <int width = WARP_SIZE>
+static __dpct_inline__ float warp_reduce_max(float x) {
+#pragma unroll
+ for (int offset = width / 2; offset > 0; offset >>= 1) {
+ x = sycl::fmax(x, dpct::permute_sub_group_by_xor(
+ sycl::ext::oneapi::this_work_item::get_sub_group(), x,
+ offset, width));
+ }
+ return x;
+}
+
+static __dpct_inline__ float warp_reduce_max(float x,
+ const sycl::nd_item<3>& item_ct1) {
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ x = sycl::fmax(x, dpct::permute_sub_group_by_xor(
+ item_ct1.get_sub_group(), x, mask));
+ }
+ return x;
+}
+
+/* Helper for Computing the linear offset of a ggml_tensor given
+per-dimension sizes, strides, and indices */
+template<int N>
+__dpct_inline__ size_t calculate_offset(const std::array<int, N> & strides, const std::array<int, N> & indices) {
+ size_t offset = 0;
+#pragma unroll
+ for (int i = 0; i < N; i++) {
+ auto index_i = indices[i];
+ offset += strides[i] * index_i;
+ }
+ return offset;
+}
+
+// Helper for vec loading aligned data
+template <typename Tp, int n>
+inline sycl::vec<Tp, n> vec_aligned_load(const Tp* aligned_ptr) {
+ return *reinterpret_cast<const sycl::vec<Tp, n>*>(aligned_ptr);
+}
+
+// Helper for accessing pointers with no warnings
+template <typename Tp, int dim>
+static __dpct_inline__ Tp* get_pointer(sycl::local_accessor<Tp, dim> acc) {
+ return acc.template get_multi_ptr<sycl::access::decorated::no>().get();
+}
+
+int64_t downsample_sycl_global_range(int64_t accumulate_block_num, int64_t block_size);
+
+constexpr size_t ceil_div(const size_t m, const size_t n) {
+ return (m + n - 1) / n;
+}
+
+bool gpu_has_xmx(sycl::device &dev);
+
+template <int N, class T> std::string debug_get_array_str(const std::string & prefix, const T array[N]) {
+ if (LIKELY(!g_ggml_sycl_debug)) {
+ return "";
+ }
+ std::stringstream ss;
+ ss << prefix << "=[";
+ for (std::size_t i = 0; i < N - 1; ++i) {
+ ss << array[i] << ", ";
+ }
+ if constexpr (N > 0) {
+ ss << array[N - 1];
+ }
+ ss << "]";
+ return ss.str();
+}
+
+inline std::string debug_get_tensor_str(const std::string &prefix,
+ const ggml_tensor *tensor, const std::string &suffix = "") {
+ std::stringstream ss;
+ if (LIKELY(!g_ggml_sycl_debug)) { return ss.str(); }
+ ss << prefix.c_str() << "=";
+ if (tensor) {
+ ss << "'" << tensor->name << "':type=" << ggml_type_name(tensor->type);
+ ss << debug_get_array_str<GGML_MAX_DIMS>(";ne", tensor->ne);
+ ss << debug_get_array_str<GGML_MAX_DIMS>(";nb", tensor->nb);
+
+ if (!ggml_is_contiguous(tensor)) { ss << ";strided"; }
+ if (ggml_is_permuted(tensor)) { ss << ";permuted"; }
+ } else {
+ ss << "nullptr";
+ }
+ ss << suffix;
+ return ss.str();
+}
+
+// Use scope_op_debug_print to log operations coming from running a model
+struct scope_op_debug_print {
+ // Use string_views to avoid the cost of creating a string and concatenating them
+ // string_views must be alive for as long as the object is alive
+ // scope_op_debug_print are used with string literals in practice which are stored in constant space so always accessible
+ scope_op_debug_print(const std::string_view & func, const std::string_view & func_suffix, const ggml_tensor * dst,
+ std::size_t num_src, const std::string_view & suffix = "") :
+ func(func),
+ func_suffix(func_suffix) {
+ if (LIKELY(!g_ggml_sycl_debug)) {
+ return;
+ }
+ GGML_SYCL_DEBUG("[SYCL][OP] call %s%s:", func.data(), func_suffix.data());
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" dst", dst).c_str());
+ if (dst) {
+ for (std::size_t i = 0; i < num_src; ++i) {
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str("\tsrc" + std::to_string(i), dst->src[i]).c_str());
+ }
+ }
+ GGML_SYCL_DEBUG("%s\n", suffix.data());
+ }
+
+ scope_op_debug_print(const std::string_view & func, const ggml_tensor * dst, std::size_t num_src,
+ const std::string_view & suffix = "") :
+ scope_op_debug_print(func, "", dst, num_src, suffix) {}
+
+ ~scope_op_debug_print() { GGML_SYCL_DEBUG("[SYCL][OP] call %s%s done\n", func.data(), func_suffix.data()); }
+
+ private:
+ std::string_view func;
+ std::string_view func_suffix;
+};
+
+static __dpct_inline__ float get_alibi_slope(const float max_bias,
+ const uint32_t h,
+ const uint32_t n_head_log2,
+ const float m0,
+ const float m1) {
+ if (max_bias <= 0.0f) {
+ return 1.0f;
+ }
+ const float base = h < n_head_log2 ? m0 : m1;
+ const int exph = h < n_head_log2 ? h + 1 : 2*(h - n_head_log2) + 1;
+
+ return dpct::pow(base, exph);
+}
+
+static const sycl::uint3 init_fastdiv_values(uint32_t d) {
+ GGML_ASSERT(d != 0);
+
+ uint32_t L = 0;
+ while (L < 32 && (uint32_t{ 1 } << L) < d) {
+ L++;
+ }
+
+ uint32_t mp = (uint32_t) ((uint64_t{ 1 } << 32) * ((uint64_t{ 1 } << L) - d) / d + 1);
+ return sycl::uint3(mp, L, d);
+}
+
+
+static __dpct_inline__ uint32_t fastdiv(uint32_t n, const sycl::uint3 fastdiv_values) {
+ const uint32_t hi = sycl::mul_hi<unsigned>(n, fastdiv_values.x());
+ return (hi + n) >> fastdiv_values.y();
+}
+
+
+static __dpct_inline__ sycl::uint2 fast_div_modulo(uint32_t n, const sycl::uint3 fastdiv_values) {
+ const uint32_t div_val = fastdiv(n, fastdiv_values);
+ const uint32_t mod_val = n - div_val * fastdiv_values.z();
+ return sycl::uint2(div_val, mod_val);
+}
+
+static __dpct_inline__ int ggml_sycl_dp4a(const int a, const int b, int c) {
+ return dpct::dp4a(a, b, c);
+}
+
+static __dpct_inline__ float ggml_sycl_e8m0_to_fp32(uint8_t x) {
+ uint32_t bits;
+ if (x == 0) {
+ bits = 0x00400000;
+ } else {
+ bits = (uint32_t) x << 23;
+ }
+
+ float result;
+ memcpy(&result, &bits, sizeof(float));
+ return result;
+}
+
+
+#endif // GGML_SYCL_COMMON_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/concat.cpp b/llama.cpp/ggml/src/ggml-sycl/concat.cpp
new file mode 100644
index 0000000..d16215b
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/concat.cpp
@@ -0,0 +1,202 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include "concat.hpp"
+
+static inline size_t elem_size(ggml_type t) {
+ return ggml_type_size(t) / ggml_blck_size(t);
+}
+
+template <typename T>
+static void concat_T_dim0(const T *x, const T *y, T *dst,
+ const int ne0, const int ne00,
+ const sycl::nd_item<3> &item_ct1) {
+ int nidx = item_ct1.get_local_id(2) +
+ item_ct1.get_group(2) * item_ct1.get_local_range(2);
+ if (nidx >= ne0) {
+ return;
+ }
+ // operation
+ int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
+ item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
+ if (nidx < ne00) { // src0
+ int offset_src = nidx + item_ct1.get_group(1) * ne00 +
+ item_ct1.get_group(0) * ne00 * item_ct1.get_group_range(1);
+ dst[offset_dst] = x[offset_src];
+ } else {
+ int offset_src =
+ nidx - ne00 + item_ct1.get_group(1) * (ne0 - ne00) +
+ item_ct1.get_group(0) * (ne0 - ne00) * item_ct1.get_group_range(1);
+ dst[offset_dst] = y[offset_src];
+ }
+}
+
+template <typename T>
+static void concat_T_dim1(const T *x, const T *y, T *dst,
+ const int ne0, const int ne01,
+ const sycl::nd_item<3> &item_ct1) {
+ int nidx = item_ct1.get_local_id(2) +
+ item_ct1.get_group(2) * item_ct1.get_local_range(2);
+ if (nidx >= ne0) {
+ return;
+ }
+ // operation
+ int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
+ item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
+ if (item_ct1.get_group(1) < (size_t) ne01) { // src0
+ int offset_src =
+ nidx + item_ct1.get_group(1) * ne0 + item_ct1.get_group(0) * ne0 * ne01;
+ dst[offset_dst] = x[offset_src];
+ } else {
+ int offset_src =
+ nidx + (item_ct1.get_group(1) - ne01) * ne0 +
+ item_ct1.get_group(0) * ne0 * (item_ct1.get_group_range(1) - ne01);
+ dst[offset_dst] = y[offset_src];
+ }
+}
+
+template <typename T>
+static void concat_T_dim2(const T *x, const T *y, T *dst,
+ const int ne0, const int ne02,
+ const sycl::nd_item<3> &item_ct1) {
+ int nidx = item_ct1.get_local_id(2) +
+ item_ct1.get_group(2) * item_ct1.get_local_range(2);
+ if (nidx >= ne0) {
+ return;
+ }
+ // operation
+ int offset_dst = nidx + item_ct1.get_group(1) * ne0 +
+ item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
+ if (item_ct1.get_group(0) < (size_t) ne02) { // src0
+ int offset_src = nidx + item_ct1.get_group(1) * ne0 +
+ item_ct1.get_group(0) * ne0 * item_ct1.get_group_range(1);
+ dst[offset_dst] = x[offset_src];
+ } else {
+ int offset_src =
+ nidx + item_ct1.get_group(1) * ne0 +
+ (item_ct1.get_group(0) - ne02) * ne0 * item_ct1.get_group_range(1);
+ dst[offset_dst] = y[offset_src];
+ }
+}
+
+template <typename T>
+static void concat_T_sycl(const T *x, const T *y, T *dst,
+ int ne00, int ne01, int ne02, int ne0, int ne1,
+ int ne2, int dim, queue_ptr stream) {
+ int num_blocks = (ne0 + SYCL_CONCAT_BLOCK_SIZE - 1) / SYCL_CONCAT_BLOCK_SIZE;
+ sycl::range<3> gridDim(ne2, ne1, num_blocks);
+ switch (dim) {
+ case 0:
+ stream->parallel_for(sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) { concat_T_dim0<T>(x, y, dst, ne0, ne00, item_ct1); });
+ break;
+ case 1:
+ stream->parallel_for(sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) { concat_T_dim1<T>(x, y, dst, ne0, ne01, item_ct1); });
+ break;
+ // dim >=2 will be dispatched to the default path
+ default:
+ stream->parallel_for(sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CONCAT_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) { concat_T_dim2<T>(x, y, dst, ne0, ne02, item_ct1); });
+ break;
+ }
+}
+
+// non-contiguous kernel (slow)
+template<typename T>
+static void concat_T_sycl_non_cont(
+ queue_ptr stream, const char *src0, const char *src1, char *dst,
+ int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03, uint64_t nb00,
+ uint64_t nb01, uint64_t nb02, uint64_t nb03, int64_t /*ne10*/,
+ int64_t /*ne11*/, int64_t /*ne12*/, int64_t /*ne13*/, uint64_t nb10,
+ uint64_t nb11, uint64_t nb12, uint64_t nb13, int64_t ne0, int64_t ne1,
+ int64_t ne2, int64_t ne3, uint64_t nb0, uint64_t nb1, uint64_t nb2,
+ uint64_t nb3, int32_t dim) {
+ sycl::range<3> gridDim(ne3, ne2, ne1);
+ stream->parallel_for(sycl::nd_range<3>(gridDim, sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) {
+ int64_t i3 = item_ct1.get_group(0);
+ int64_t i2 = item_ct1.get_group(1);
+ int64_t i1 = item_ct1.get_group(2);
+
+ int64_t o[4] = { 0, 0, 0, 0 };
+ o[dim] = dim == 0 ? ne00 : (dim == 1 ? ne01 : (dim == 2 ? ne02 : ne03));
+
+ const T * x;
+
+ for (int i0 = item_ct1.get_local_id(2); i0 < ne0; i0 += item_ct1.get_local_range(2)) {
+ if (i0 < ne00 && i1 < ne01 && i2 < ne02 && i3 < ne03) {
+ x = (const T *) (src0 + (i3) *nb03 + (i2) *nb02 + (i1) *nb01 + (i0) *nb00);
+ } else {
+ x = (const T *) (src1 + (i3 - o[3]) * nb13 + (i2 - o[2]) * nb12 + (i1 - o[1]) * nb11 +
+ (i0 - o[0]) * nb10);
+ }
+
+ T *y = (T *)(dst + i3 * nb3 + i2 * nb2 + i1 * nb1 + i0 * nb0);
+
+ *y = *x;
+ }
+ });
+}
+
+template <typename T>
+void concat_impl_sycl(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ const ggml_tensor * src0 = dst->src[0];
+ const ggml_tensor * src1 = dst->src[1];
+ queue_ptr stream = ctx.stream();
+
+ const int32_t dim = ((int32_t *) dst->op_params)[0];
+
+ if (ggml_is_contiguous(src0) && ggml_is_contiguous(src1)) {
+ const T * src0_d = (const T *) src0->data;
+ const T * src1_d = (const T *) src1->data;
+ T * dst_d = (T *) dst->data;
+ size_t type_size = elem_size(dst->type);
+ if (dim != 3) {
+ for (int i3 = 0; i3 < dst->ne[3]; i3++) {
+ concat_T_sycl<T>(src0_d + i3 * (src0->nb[3] / type_size), src1_d + i3 * (src1->nb[3] / type_size),
+ dst_d + i3 * (dst->nb[3] / type_size), src0->ne[0], src0->ne[1], src0->ne[2], dst->ne[0],
+ dst->ne[1], dst->ne[2], dim, stream);
+ }
+ } else {
+ const size_t size0 = ggml_nbytes(src0);
+ const size_t size1 = ggml_nbytes(src1);
+
+ SYCL_CHECK(CHECK_TRY_ERROR(stream->memcpy(dst_d, src0_d, size0).wait()));
+ SYCL_CHECK(CHECK_TRY_ERROR(stream->memcpy(dst_d + size0 / type_size, src1_d, size1).wait()));
+ }
+ } else {
+ concat_T_sycl_non_cont<T>(stream, (const char *) src0->data, (const char *) src1->data, (char *) dst->data,
+ src0->ne[0], src0->ne[1], src0->ne[2], src0->ne[3], src0->nb[0], src0->nb[1],
+ src0->nb[2], src0->nb[3], src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3],
+ src1->nb[0], src1->nb[1], src1->nb[2], src1->nb[3], dst->ne[0], dst->ne[1], dst->ne[2],
+ dst->ne[3], dst->nb[0], dst->nb[1], dst->nb[2], dst->nb[3], dim);
+ }
+}
+
+void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+
+ switch (dst->type) {
+ case GGML_TYPE_F32:
+ concat_impl_sycl<float>(ctx, dst);
+ break;
+ case GGML_TYPE_I32:
+ concat_impl_sycl<int32_t>(ctx, dst);
+ break;
+ default:
+ GGML_ASSERT(false && "ggml_sycl_op_concat: unsupported type");
+ break;
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/concat.hpp b/llama.cpp/ggml/src/ggml-sycl/concat.hpp
new file mode 100644
index 0000000..e5cb731
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/concat.hpp
@@ -0,0 +1,20 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_CONCAT_HPP
+#define GGML_SYCL_CONCAT_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_concat(ggml_backend_sycl_context & ctx, ggml_tensor *dst);
+
+#endif // GGML_SYCL_CONCAT_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/conv.cpp b/llama.cpp/ggml/src/ggml-sycl/conv.cpp
new file mode 100644
index 0000000..475bd34
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/conv.cpp
@@ -0,0 +1,101 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include "conv.hpp"
+
+static void conv_transpose_1d_kernel(
+ const int s0, const int output_size,
+ const int src0_ne0, const int src0_ne1, const int src0_ne2,
+ const int src1_ne0, const int dst_ne0,
+ const float * src0, const float * src1, float * dst,
+ const sycl::nd_item<3> &item_ct1) {
+ int global_index = item_ct1.get_local_id(2) +
+ item_ct1.get_group(2) * item_ct1.get_local_range(2);
+ if (global_index >= output_size) {
+ return;
+ }
+
+ int out_index = global_index / dst_ne0;
+
+ float accumulator = 0;
+
+ for (int c = 0; c < src0_ne2; c++) {
+ int idx = global_index % dst_ne0;
+
+ int kernel_offset = (src0_ne0 * src0_ne1 * c) + (out_index * src0_ne0);
+ int input_offset = src1_ne0 * c;
+
+ for (int i = 0; i < src1_ne0; i++) {
+ if (!(idx >= i*s0 && idx < i*s0 + src0_ne0)) {
+ continue;
+ }
+ int weight_idx = idx - i*s0;
+
+ float kernel_weight = src0[kernel_offset + weight_idx];
+ float input_value = src1[input_offset+i];
+
+ accumulator += kernel_weight * input_value;
+ }
+ }
+ dst[global_index] = accumulator;
+}
+
+static void conv_transpose_1d_f32_f32_sycl(
+ const int s0, const int output_size,
+ const int src0_ne0, const int src0_ne1, const int src0_ne2,
+ const int src1_ne0, const int dst_ne0,
+ const float *src0, const float *src1, float *dst,
+ const queue_ptr& stream) {
+
+ const int num_blocks = (output_size + SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE - 1) / SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE;
+ const sycl::range<3> block_dims(1, 1, SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE);
+ const sycl::range<3> block_nums(1, 1, num_blocks);
+ stream->parallel_for(
+ sycl::nd_range<3>(
+ block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ conv_transpose_1d_kernel(
+ s0, output_size,
+ src0_ne0, src0_ne1, src0_ne2,
+ src1_ne0, dst_ne0,
+ src0, src1, dst, item_ct1);
+ });
+}
+
+void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ const ggml_tensor *src0 = dst->src[0];
+ const ggml_tensor *src1 = dst->src[1];
+ const float * src0_d = (const float *)src0->data;
+ const float * src1_d = (const float *)src1->data;
+
+ float * dst_d = (float *)dst->data;
+ dpct::queue_ptr stream = ctx.stream();
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+
+ GGML_ASSERT(ggml_is_contiguous(src0));
+ GGML_ASSERT(ggml_is_contiguous(src1));
+
+ const int32_t * opts = (const int32_t *)dst->op_params;
+
+ const int s0 = opts[0];
+
+ const int64_t output_size = ggml_nelements(dst);
+
+ conv_transpose_1d_f32_f32_sycl(s0, output_size,
+ src0->ne[0], src0->ne[1], src0->ne[2],
+ src1->ne[0], dst->ne[0],
+ src0_d, src1_d, dst_d, stream);
+}
+
diff --git a/llama.cpp/ggml/src/ggml-sycl/conv.hpp b/llama.cpp/ggml/src/ggml-sycl/conv.hpp
new file mode 100644
index 0000000..f9e60dc
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/conv.hpp
@@ -0,0 +1,20 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_CONV_HPP
+#define GGML_SYCL_CONV_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_conv_transpose_1d(ggml_backend_sycl_context & ctx, ggml_tensor *dst);
+
+#endif // GGML_SYCL_CONV_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/convert.cpp b/llama.cpp/ggml/src/ggml-sycl/convert.cpp
new file mode 100644
index 0000000..8bdae36
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/convert.cpp
@@ -0,0 +1,676 @@
+#include "convert.hpp"
+#include "dequantize.hpp"
+#include "presets.hpp"
+
+#if defined(__INTEL_LLVM_COMPILER)
+ #if __has_include(<sycl/ext/oneapi/bfloat16.hpp>)
+ #include <sycl/ext/oneapi/bfloat16.hpp>
+ #define GGML_SYCL_HAS_BF16
+ #endif
+#endif
+
+template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
+static void dequantize_block(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t k,
+ const sycl::nd_item<3> &item_ct1) {
+ const int64_t i = 2 * (item_ct1.get_local_range(2) * item_ct1.get_group(2) +
+ item_ct1.get_local_id(2));
+
+ if (i >= k) {
+ return;
+ }
+
+ const int64_t ib = i/qk; // block index
+ const int64_t iqs = (i%qk)/qr; // quant index
+ const int64_t iybs = i - i%qk; // y block start index
+ const int64_t y_offset = qr == 1 ? 1 : qk/2;
+
+ // dequantize
+ dfloat2 v;
+ dequantize_kernel(vx, ib, iqs, v);
+
+ y[iybs + iqs + 0] = v.x();
+ y[iybs + iqs + y_offset] = v.y();
+}
+
+template <int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
+static void dequantize_block_sycl(const void *__restrict__ vx,
+ dst_t *__restrict__ y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t num_blocks = (k + 2*SYCL_DEQUANTIZE_BLOCK_SIZE - 1) / (2*SYCL_DEQUANTIZE_BLOCK_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+ stream->parallel_for(
+ sycl::nd_range<3>(
+ sycl::range<3>(1, 1, num_blocks) *
+ sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_DEQUANTIZE_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block<qk, qr, dequantize_kernel>(vx, y, k, item_ct1);
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_q2_K_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+#if QK_K == 256
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 64),
+ sycl::range<3>(1, 1, 64)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q2_K(vx, y, item_ct1);
+ });
+ }
+#else
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q2_K(vx, y, item_ct1);
+ });
+ }
+
+#endif
+}
+
+template <typename dst_t>
+static void dequantize_row_q3_K_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+#if QK_K == 256
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 64),
+ sycl::range<3>(1, 1, 64)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q3_K(vx, y, item_ct1);
+ });
+ }
+#else
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q3_K(vx, y, item_ct1);
+ });
+ }
+#endif
+}
+
+template <typename dst_t>
+static void dequantize_row_q4_0_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb32 = k / 32;
+ const int64_t nb = (k + 255) / 256;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q4_0(vx, y, nb32, item_ct1);
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_q4_0_sycl_reorder(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ int constexpr WARP_K = WARP_SIZE * QK4_0;
+ const int n_warp = (k + WARP_K - 1) / WARP_K;
+ GGML_ASSERT(k % 2 == 0);
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, n_warp) *
+ sycl::range<3>(1, 1, WARP_SIZE),
+ sycl::range<3>(1, 1, WARP_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]]{
+ dequantize_block_q4_0_reorder(vx, y, k, item_ct1);
+ });
+
+}
+
+template <typename dst_t>
+static void dequantize_row_q4_1_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb32 = k / 32;
+ const int64_t nb = (k + 255) / 256;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q4_1(vx, y, nb32, item_ct1);
+ });
+ }
+}
+
+
+template <typename dst_t>
+static void dequantize_row_q4_K_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<uint8_t, 1> scale_local_acc(sycl::range<1>(12), cgh);
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q4_K(vx, y, get_pointer(scale_local_acc), item_ct1);
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_q4_K_sycl_reorder(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ const size_t local_size = 32;
+ const size_t global_size = nb * local_size;
+
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ stream->submit([&](sycl::handler & cgh) {
+ sycl::local_accessor<uint8_t, 1> scale_local_acc(sycl::range<1>(12), cgh);
+
+ cgh.parallel_for(sycl::nd_range<1>(sycl::range<1>(global_size), sycl::range<1>(local_size)),
+ [=](sycl::nd_item<1> item_ct1) {
+ dequantize_block_q4_K_reorder(vx, y, get_pointer(scale_local_acc), item_ct1, nb);
+ });
+ });
+}
+
+template <typename dst_t>
+static void dequantize_row_q5_K_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+#if QK_K == 256
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 64),
+ sycl::range<3>(1, 1, 64)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q5_K(vx, y, item_ct1);
+ });
+ }
+#else
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q5_K(vx, y, item_ct1);
+ });
+ }
+
+#endif
+}
+
+template <typename dst_t>
+static void dequantize_row_q6_K_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+#if QK_K == 256
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 64),
+ sycl::range<3>(1, 1, 64)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q6_K(vx, y, item_ct1);
+ });
+ }
+#else
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_q6_K(vx, y, item_ct1);
+ });
+ }
+
+#endif
+}
+
+template <typename dst_t>
+static void dequantize_row_q6_K_sycl_reorder(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 64), sycl::range<3>(1, 1, 64)),
+ [=](sycl::nd_item<3> item_ct1) { dequantize_block_q6_K_reorder(vx, y, item_ct1, nb); });
+}
+
+template <typename dst_t>
+static void dequantize_row_iq1_s_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq1_s(
+ vx, y, item_ct1, iq1s_grid_gpu
+ );
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_iq1_m_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq1_m(
+ vx, y, item_ct1, iq1s_grid_gpu
+ );
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_iq2_xxs_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq2_xxs(
+ vx, y, item_ct1, iq2xxs_grid,
+ ksigns_iq2xs, kmask_iq2xs);
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_iq2_xs_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq2_xs(
+ vx, y, item_ct1, iq2xs_grid,
+ ksigns_iq2xs, kmask_iq2xs);
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_iq2_s_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq2_s(vx, y, item_ct1);
+ });
+ });
+ }
+}
+
+
+template <typename dst_t>
+static void dequantize_row_iq3_xxs_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq3_xxs(
+ vx, y, item_ct1, iq3xxs_grid,
+ ksigns_iq2xs, kmask_iq2xs);
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_iq3_s_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = k / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq3_s(
+ vx, y, item_ct1, kmask_iq2xs, iq3s_grid);
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_iq4_xs_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = (k + QK_K - 1) / QK_K;
+#if QK_K == 64
+ dequantize_row_iq4_nl_sycl(vx, y, k, stream);
+#else
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq4_xs(vx, y, item_ct1);
+ });
+ });
+ }
+#endif
+}
+
+template <typename dst_t>
+static void dequantize_row_iq4_nl_sycl(const void *vx, dst_t *y, const int64_t k,
+ dpct::queue_ptr stream) {
+ const int64_t nb = (k + QK_K - 1) / QK_K;
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nb) *
+ sycl::range<3>(1, 1, 32),
+ sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_iq4_nl(vx, y, item_ct1);
+ });
+ });
+ }
+}
+
+template <typename dst_t>
+static void dequantize_row_mxfp4_sycl(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr stream) {
+ const int nb = (k + QK_K - 1) / QK_K;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nb) * sycl::range<3>(1, 1, 32), sycl::range<3>(1, 1, 32)),
+ [=](sycl::nd_item<3> item_ct1) {
+ dequantize_block_mxfp4(vx, y, item_ct1);
+ });
+}
+
+template <typename src_t, typename dst_t>
+static void convert_unary_nc(const void * __restrict__ vx, dst_t * __restrict__ y, const int64_t ne00, const int64_t ne01,
+ const int64_t ne02, const int64_t s01, const int64_t s02, const int64_t s03,
+ const sycl::nd_item<3> & item_ct1) {
+
+ const int64_t work_group_size = item_ct1.get_local_range(2);
+ const int64_t global_id = item_ct1.get_local_id(2) + work_group_size * item_ct1.get_group(2);
+
+ const int64_t i01 = item_ct1.get_group(1);
+ const int64_t i02 = item_ct1.get_group(0) % ne02;
+ const int64_t i03 = item_ct1.get_group(0) / ne02;
+
+ // make each work-item deal with more elements since sycl global range can not exceed max int
+ const src_t * x = static_cast<const src_t *>(vx);
+ const int64_t ix = i03 * s03 + i02 * s02 + i01 * s01;
+ const int64_t iy = ((i03 * ne02 + i02) * ne01 + i01) * ne00;
+
+#pragma unroll
+ for (int64_t i00 = global_id; i00 < ne00; i00 += work_group_size * item_ct1.get_group_range(2)) {
+ y[iy + i00] = static_cast<dst_t>(x[ix + i00]);
+ }
+}
+
+template <typename src_t, typename dst_t>
+static void convert_unary_nc_sycl(const void * __restrict__ vx, dst_t * __restrict__ y,
+ const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03,
+ const int64_t s01, const int64_t s02, const int64_t s03, dpct::queue_ptr queue) {
+ dpct::has_capability_or_fail(queue->get_device(), { sycl::aspect::fp16 });
+
+ sycl::range<3> global_size(ne02 * ne03, ne01, ceil_div(ne00, SYCL_DEQUANTIZE_BLOCK_SIZE));
+
+ // decrease global range when it exceeds the max int
+ // TODO: Downsample logic is separated from the kernel, a rewrite is desirable
+ int64_t downsized_workgroup = downsample_sycl_global_range(global_size[0], SYCL_DEQUANTIZE_BLOCK_SIZE);
+ sycl::range<3> workgroup_size(1, 1, downsized_workgroup);
+
+ queue->parallel_for(sycl::nd_range<3>(global_size * workgroup_size, workgroup_size), [=](sycl::nd_item<3> item_ct1) {
+ convert_unary_nc<src_t>(vx, y, ne00, ne01, ne02, s01, s02, s03, item_ct1);
+ });
+}
+
+template <typename src_t, typename dst_t>
+static void convert_unary_sycl(const void * vx, dst_t * y, const int64_t k, dpct::queue_ptr queue) {
+ convert_unary_nc_sycl<src_t>(vx, y, k, 1, 1, 1, k, k, k, queue);
+}
+
+
+to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type, ggml_tensor * dst) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ if (dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) {
+ return dequantize_row_q4_0_sycl_reorder;
+ } else {
+ return dequantize_block_sycl<QK4_0, QR4_0, dequantize_q4_0>;
+ }
+ case GGML_TYPE_Q4_1:
+ return dequantize_block_sycl<QK4_1, QR4_1, dequantize_q4_1>;
+ case GGML_TYPE_Q5_0:
+ return dequantize_block_sycl<QK5_0, QR5_0, dequantize_q5_0>;
+ case GGML_TYPE_Q5_1:
+ return dequantize_block_sycl<QK5_1, QR5_1, dequantize_q5_1>;
+ case GGML_TYPE_Q8_0:
+ return dequantize_block_sycl<QK8_0, QR8_0, dequantize_q8_0>;
+ case GGML_TYPE_Q2_K:
+ return dequantize_row_q2_K_sycl;
+ case GGML_TYPE_Q3_K:
+ return dequantize_row_q3_K_sycl;
+ case GGML_TYPE_Q4_K:
+ if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) {
+ return dequantize_row_q4_K_sycl_reorder;
+ } else {
+ return dequantize_row_q4_K_sycl;
+ }
+ case GGML_TYPE_Q5_K:
+ return dequantize_row_q5_K_sycl;
+ case GGML_TYPE_Q6_K:
+ if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) {
+ return dequantize_row_q6_K_sycl_reorder;
+ } else {
+ return dequantize_row_q6_K_sycl;
+ }
+ case GGML_TYPE_IQ1_S:
+ return dequantize_row_iq1_s_sycl;
+ case GGML_TYPE_IQ1_M:
+ return dequantize_row_iq1_m_sycl;
+ case GGML_TYPE_IQ2_XXS:
+ return dequantize_row_iq2_xxs_sycl;
+ case GGML_TYPE_IQ2_XS:
+ return dequantize_row_iq2_xs_sycl;
+ case GGML_TYPE_IQ2_S:
+ return dequantize_row_iq2_s_sycl;
+ case GGML_TYPE_IQ3_XXS:
+ return dequantize_row_iq3_xxs_sycl;
+ case GGML_TYPE_IQ3_S:
+ return dequantize_row_iq3_s_sycl;
+ case GGML_TYPE_IQ4_XS:
+ return dequantize_row_iq4_xs_sycl;
+ case GGML_TYPE_IQ4_NL:
+ return dequantize_row_iq4_nl_sycl;
+ case GGML_TYPE_MXFP4:
+ return dequantize_row_mxfp4_sycl;
+ case GGML_TYPE_F32:
+ return convert_unary_sycl<float>;
+#ifdef GGML_SYCL_HAS_BF16
+ case GGML_TYPE_BF16:
+ return convert_unary_sycl<sycl::ext::oneapi::bfloat16>;
+#endif
+ default:
+ return nullptr;
+ }
+}
+
+to_fp32_sycl_t ggml_get_to_fp32_sycl(ggml_type type, ggml_tensor *dst) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ if (dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) {
+ return dequantize_row_q4_0_sycl_reorder;
+ } else {
+ return dequantize_row_q4_0_sycl;
+ }
+ case GGML_TYPE_Q4_1:
+ return dequantize_row_q4_1_sycl;
+ case GGML_TYPE_Q5_0:
+ return dequantize_block_sycl<QK5_0, QR5_0, dequantize_q5_0>;
+ case GGML_TYPE_Q5_1:
+ return dequantize_block_sycl<QK5_1, QR5_1, dequantize_q5_1>;
+ case GGML_TYPE_Q8_0:
+ return dequantize_block_sycl<QK8_0, QR8_0, dequantize_q8_0>;
+ case GGML_TYPE_Q2_K:
+ return dequantize_row_q2_K_sycl;
+ case GGML_TYPE_Q3_K:
+ return dequantize_row_q3_K_sycl;
+ case GGML_TYPE_Q4_K:
+ if (dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) {
+ return dequantize_row_q4_K_sycl_reorder;
+ } else {
+ return dequantize_row_q4_K_sycl;
+ }
+ case GGML_TYPE_Q5_K:
+ return dequantize_row_q5_K_sycl;
+ case GGML_TYPE_Q6_K:
+ if (dst->src[0]->extra && ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) {
+ return dequantize_row_q6_K_sycl_reorder;
+ } else {
+ return dequantize_row_q6_K_sycl;
+ }
+ case GGML_TYPE_IQ1_S:
+ return dequantize_row_iq1_s_sycl;
+ case GGML_TYPE_IQ1_M:
+ return dequantize_row_iq1_m_sycl;
+ case GGML_TYPE_IQ2_XXS:
+ return dequantize_row_iq2_xxs_sycl;
+ case GGML_TYPE_IQ2_XS:
+ return dequantize_row_iq2_xs_sycl;
+ case GGML_TYPE_IQ2_S:
+ return dequantize_row_iq2_s_sycl;
+ case GGML_TYPE_IQ3_XXS:
+ return dequantize_row_iq3_xxs_sycl;
+ case GGML_TYPE_IQ3_S:
+ return dequantize_row_iq3_s_sycl;
+ case GGML_TYPE_IQ4_XS:
+ return dequantize_row_iq4_xs_sycl;
+ case GGML_TYPE_IQ4_NL:
+ return dequantize_row_iq4_nl_sycl;
+ case GGML_TYPE_MXFP4:
+ return dequantize_row_mxfp4_sycl;
+ case GGML_TYPE_F16:
+ return convert_unary_sycl<sycl::half>;
+#ifdef GGML_SYCL_HAS_BF16
+ case GGML_TYPE_BF16:
+ return convert_unary_sycl<sycl::ext::oneapi::bfloat16>;
+#endif
+ default:
+ return nullptr;
+ }
+}
+
+to_fp16_nc_sycl_t get_to_fp16_nc_sycl(ggml_type type) {
+ switch (type) {
+ case GGML_TYPE_F32:
+ return convert_unary_nc_sycl<float>;
+#ifdef GGML_SYCL_HAS_BF16
+ case GGML_TYPE_BF16:
+ return convert_unary_nc_sycl<sycl::ext::oneapi::bfloat16>;
+#endif
+ default:
+ return nullptr;
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/convert.hpp b/llama.cpp/ggml/src/ggml-sycl/convert.hpp
new file mode 100644
index 0000000..f8cb573
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/convert.hpp
@@ -0,0 +1,34 @@
+//
+// MIT license
+// Copyright (C) 2025 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_CONVERT_HPP
+#define GGML_SYCL_CONVERT_HPP
+
+#include "common.hpp"
+
+template <typename T>
+using to_t_sycl_t = void (*)(const void * __restrict__ x, T * __restrict__ y, int64_t k, dpct::queue_ptr stream);
+typedef to_t_sycl_t<float> to_fp32_sycl_t;
+typedef to_t_sycl_t<sycl::half> to_fp16_sycl_t;
+
+to_fp16_sycl_t ggml_get_to_fp16_sycl(ggml_type type, ggml_tensor * dst);
+to_fp32_sycl_t ggml_get_to_fp32_sycl(ggml_type type, ggml_tensor * dst);
+
+// Nc = Non-contiguous
+template <typename T>
+using to_t_nc_sycl_t = void (*)(const void * x, T * y, int64_t ne00, int64_t ne01, int64_t ne02, int64_t ne03,
+ int64_t s01, int64_t s02, int64_t s03, dpct::queue_ptr queue);
+
+typedef to_t_nc_sycl_t<sycl::half> to_fp16_nc_sycl_t;
+to_fp16_nc_sycl_t get_to_fp16_nc_sycl(ggml_type type);
+
+#endif // GGML_SYCL_CONVERT_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/count-equal.cpp b/llama.cpp/ggml/src/ggml-sycl/count-equal.cpp
new file mode 100644
index 0000000..b0a8b48
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/count-equal.cpp
@@ -0,0 +1,79 @@
+#include "count-equal.hpp"
+
+#include <cstdint>
+
+template <typename T>
+static void count_equal(const T *__restrict__ x, const T *__restrict__ y,
+ int64_t *__restrict__ dst, const int64_t dk,
+ const int64_t k) {
+ auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>();
+ const int64_t i0 = (int64_t)item_ct1.get_group(2) * dk;
+ const int64_t i1 = sycl::min(i0 + dk, k);
+
+ int nequal = 0;
+
+ for (int64_t i = i0 + item_ct1.get_local_id(2); i < i1; i += WARP_SIZE) {
+ const T xi = x[i];
+ const T yi = y[i];
+ nequal += xi == yi;
+ }
+
+ nequal = warp_reduce_sum(nequal);
+
+ if (item_ct1.get_local_id(2) != 0) {
+ return;
+ }
+
+ dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
+ (int *)dst, nequal);
+}
+
+void ggml_sycl_count_equal(ggml_backend_sycl_context &ctx, ggml_tensor *dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ const ggml_tensor * src0 = dst->src[0];
+ const ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(src0->type == src1->type);
+ GGML_ASSERT( dst->type == GGML_TYPE_I64);
+
+ GGML_ASSERT(ggml_are_same_shape(src0, src1));
+ GGML_ASSERT(ggml_is_contiguous(src0));
+ GGML_ASSERT(ggml_is_contiguous(src1));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+
+ int64_t * dst_d = (int64_t *) dst->data;
+
+ dpct::queue_ptr stream = ctx.stream();
+ const int id = get_current_device_id();
+ const int nsm = ggml_sycl_info().devices[id].nsm;
+
+ const int64_t ne = ggml_nelements(src0);
+ GGML_ASSERT(ne < (1 << 30) && "atomicAdd implementation only supports int");
+ const int64_t dne =
+ GGML_PAD((ne + 4 * nsm - 1) / (4 * nsm), SYCL_COUNT_EQUAL_CHUNK_SIZE);
+
+ SYCL_CHECK(CHECK_TRY_ERROR(stream->memset(dst_d, 0, ggml_nbytes(dst))));
+
+ const dpct::dim3 block_dims(WARP_SIZE, 1, 1);
+ const dpct::dim3 block_nums(
+ std::min((int64_t)4 * nsm, (ne + SYCL_COUNT_EQUAL_CHUNK_SIZE - 1) /
+ SYCL_COUNT_EQUAL_CHUNK_SIZE),
+ 1, 1);
+
+ switch (src0->type) {
+ case GGML_TYPE_I32: {
+ const int *src0_d = (const int *)src0->data;
+ const int *src1_d = (const int *)src1->data;
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ count_equal(src0_d, src1_d, dst_d, dne, ne);
+ GGML_UNUSED(item_ct1);
+ });
+
+ } break;
+ default:
+ GGML_ASSERT(false);
+ break;
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/count-equal.hpp b/llama.cpp/ggml/src/ggml-sycl/count-equal.hpp
new file mode 100644
index 0000000..f7f4fcb
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/count-equal.hpp
@@ -0,0 +1,9 @@
+#ifndef GGML_SYCL_COUNT_EQUAL_HPP
+#define GGML_SYCL_COUNT_EQUAL_HPP
+#include "common.hpp"
+
+#define SYCL_COUNT_EQUAL_CHUNK_SIZE 128
+
+void ggml_sycl_count_equal(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif //GGML_SYCL_COUNT_EQUAL_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/cpy.cpp b/llama.cpp/ggml/src/ggml-sycl/cpy.cpp
new file mode 100644
index 0000000..9670955
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/cpy.cpp
@@ -0,0 +1,602 @@
+#include "cpy.hpp"
+
+#include <float.h>
+
+#include "dequantize.hpp"
+#include "ggml-sycl/common.hpp"
+#include "ggml-sycl/presets.hpp"
+#include "ggml.h"
+
+
+static void cpy_1_f32_f32(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ float * dsti = (float *) cdsti;
+
+ *dsti = *xi;
+}
+
+static void cpy_1_f32_f16(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ sycl::half * dsti = (sycl::half *) cdsti;
+
+ *dsti = sycl::vec<float, 1>(*xi).convert<sycl::half, sycl::rounding_mode::automatic>()[0];
+}
+
+static void cpy_1_f16_f16(const char * cxi, char * cdsti) {
+ const sycl::half * xi = (const sycl::half *) cxi;
+ sycl::half * dsti = (sycl::half *) cdsti;
+
+ *dsti = *xi;
+}
+
+static void cpy_1_f16_f32(const char * cxi, char * cdsti) {
+ const sycl::half * xi = (const sycl::half *) cxi;
+ float * dsti = (float *) cdsti;
+
+ *dsti = *xi;
+}
+
+static void cpy_1_i16_i16(const char * cxi, char * cdsti) {
+ const int16_t * xi = (const int16_t *) cxi;
+ int16_t * dsti = (int16_t *) cdsti;
+
+ *dsti = *xi;
+}
+
+static void cpy_1_i32_i32(const char * cxi, char * cdsti) {
+ const int32_t * xi = (const int32_t *) cxi;
+ int32_t * dsti = (int32_t *) cdsti;
+
+ *dsti = *xi;
+}
+
+template <cpy_kernel_t cpy_1>
+static void cpy_f32_f16(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02,
+ const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11,
+ const int ne12, const int nb10, const int nb11, const int nb12, const int nb13,
+ const sycl::nd_item<3> & item_ct1) {
+ const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2);
+
+ if (i >= ne) {
+ return;
+ }
+
+ // determine indices i02/i12, i01/i11, i00/i10 as a function of index i of flattened tensor
+ // then combine those indices with the corresponding byte offsets to get the total offsets
+ const int i03 = i / (ne00 * ne01 * ne02);
+ const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
+ const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00;
+ const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00;
+ const int x_offset = i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03;
+
+ const int i13 = i / (ne10 * ne11 * ne12);
+ const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11);
+ const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10;
+ const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10;
+ const int dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13;
+
+ cpy_1(cx + x_offset, cdst + dst_offset);
+}
+
+
+/* quantized type same copy */
+template<typename T>
+static void cpy_blck_q_q(const char * cxi, char * cdsti) {
+ const T * xi = (const T *) cxi;
+ T * dsti = (T *) cdsti;
+ *dsti = *xi;
+}
+
+
+static void cpy_blck_q8_0_f32(const char * cxi, char * cdsti) {
+ float * cdstf = (float *) (cdsti);
+
+ for (int j = 0; j < QK8_0; j += 2) {
+ dfloat2 dq;
+ dequantize_q8_0(cxi, 0, j, dq);
+ *(cdstf + j) = dq.x();
+ *(cdstf + j + 1) = dq.y();
+ }
+}
+
+
+
+template <dequantize_kernel_t dequant, int qk> static void cpy_blck_q_f32(const char * cxi, char * cdsti) {
+ float * cdstf = (float *) (cdsti);
+
+ for (int j = 0; j < qk / 2; j++) {
+ dfloat2 dq;
+ dequant(cxi, 0, j, dq);
+ *(cdstf + j) = dq.x();
+ *(cdstf + j + qk / 2) = dq.y();
+ }
+}
+
+
+template <typename T, int qk>
+static void cpy_q_q(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02,
+ const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11,
+ const int ne12, const int nb10, const int nb11, const int nb12, const int nb13,
+ const sycl::nd_item<3> & item_ct1) {
+ const int i = (item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2)) * qk;
+
+ if (i >= ne) {
+ return;
+ }
+
+ const int i03 = i / (ne00 * ne01 * ne02);
+ const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
+ const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00;
+ const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00;
+ const int x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03;
+
+
+ const int i13 = i / (ne10 * ne11 * ne12);
+ const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11);
+ const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10;
+ const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10;
+ const int dst_offset = (i10 / qk) * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13;
+
+ cpy_blck_q_q<T>(cx + x_offset, cdst + dst_offset);
+}
+
+template <cpy_kernel_t cpy_blck, int qk>
+static void cpy_f32_q(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02,
+ const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11,
+ const int ne12, const int nb10, const int nb11, const int nb12, const int nb13,
+ const sycl::nd_item<3> & item_ct1) {
+ const int i = (item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2)) * qk;
+
+ if (i >= ne) {
+ return;
+ }
+
+
+ const int i03 = i / (ne00 * ne01 * ne02);
+ const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
+ const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00;
+ const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00;
+ const int x_offset = i00 * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03;
+
+ const int i13 = i / (ne10 * ne11 * ne12);
+ const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11);
+ const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10;
+ const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10;
+ const int dst_offset = (i10 / qk) * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13;
+
+ cpy_blck(cx + x_offset, cdst + dst_offset);
+}
+
+template <cpy_kernel_t cpy_blck, int qk>
+static void cpy_q_f32(const char * cx, char * cdst, const int ne, const int ne00, const int ne01, const int ne02,
+ const int nb00, const int nb01, const int nb02, const int nb03, const int ne10, const int ne11,
+ const int ne12, const int nb10, const int nb11, const int nb12, const int nb13,
+ const sycl::nd_item<3> & item_ct1) {
+ const int i = (item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2)) * qk;
+
+ if (i >= ne) {
+ return;
+ }
+
+ const int i03 = i / (ne00 * ne01 * ne02);
+ const int i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
+ const int i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00) / ne00;
+ const int i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne01 * ne00 - i01 * ne00;
+ const int x_offset = (i00 / qk) * nb00 + i01 * nb01 + i02 * nb02 + i03 * nb03;
+
+ const int i13 = i / (ne10 * ne11 * ne12);
+ const int i12 = (i - i13 * ne10 * ne11 * ne12) / (ne10 * ne11);
+ const int i11 = (i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11) / ne10;
+ const int i10 = i - i13 * ne10 * ne11 * ne12 - i12 * ne10 * ne11 - i11 * ne10;
+ const int dst_offset = i10 * nb10 + i11 * nb11 + i12 * nb12 + i13 * nb13;
+
+ cpy_blck(cx + x_offset, cdst + dst_offset);
+}
+
+static void ggml_cpy_f16_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
+ {
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_f16<cpy_1_f16_f32>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12,
+ nb10, nb11, nb12, nb13, item_ct1);
+ });
+ }
+}
+
+static void ggml_cpy_f32_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
+ {
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_f16<cpy_1_f32_f32>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12,
+ nb10, nb11, nb12, nb13, item_ct1);
+ });
+ }
+}
+
+static void ggml_cpy_f32_f16_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
+ {
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_f16<cpy_1_f32_f16>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12,
+ nb10, nb11, nb12, nb13, item_ct1);
+ });
+ }
+}
+
+static void ggml_cpy_f32_q8_0_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ GGML_ASSERT(ne % QK8_0 == 0);
+ const int num_blocks = ne / QK8_0;
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_q<cpy_blck_f32_q8_0, QK8_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03,
+ ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+static void ggml_cpy_q8_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ne;
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_f32<cpy_blck_q8_0_f32, QK8_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03,
+ ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+static void ggml_cpy_f32_q4_0_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ GGML_ASSERT(ne % QK4_0 == 0);
+ const int num_blocks = ne / QK4_0;
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_q<cpy_blck_f32_q4_0, QK4_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03,
+ ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+static void ggml_cpy_q4_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ne;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_f32<cpy_blck_q_f32<dequantize_q4_0, QK4_0>, QK4_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
+ nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
+ item_ct1);
+ });
+}
+
+static void ggml_cpy_f32_q4_1_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ GGML_ASSERT(ne % QK4_1 == 0);
+ const int num_blocks = ne / QK4_1;
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_q<cpy_blck_f32_q4_1, QK4_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03,
+ ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+static void ggml_cpy_q4_1_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ne;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_f32<cpy_blck_q_f32<dequantize_q4_1, QK4_1>, QK4_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
+ nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
+ item_ct1);
+ });
+}
+
+static void ggml_cpy_f32_q5_0_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ GGML_ASSERT(ne % QK5_0 == 0);
+ const int num_blocks = ne / QK5_0;
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_q<cpy_blck_f32_q5_0, QK5_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03,
+ ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+static void ggml_cpy_q5_0_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ne;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_f32<cpy_blck_q_f32<dequantize_q5_0, QK5_0>, QK5_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
+ nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
+ item_ct1);
+ });
+}
+
+static void ggml_cpy_f32_q5_1_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ GGML_ASSERT(ne % QK5_1 == 0);
+ const int num_blocks = ne / QK5_1;
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_q<cpy_blck_f32_q5_1, QK5_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03,
+ ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+static void ggml_cpy_q5_1_f32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ne;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_f32<cpy_blck_q_f32<dequantize_q5_1, QK5_1>, QK5_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02,
+ nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13,
+ item_ct1);
+ });
+}
+
+static void ggml_cpy_f32_iq4_nl_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ GGML_ASSERT(ne % QK4_NL == 0);
+ const int num_blocks = ne / QK4_NL;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks), sycl::range<3>(1, 1, 1)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_q<cpy_blck_f32_iq4_nl, QK4_NL>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11,
+ ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+static void ggml_cpy_f16_f16_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
+ {
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_f16<cpy_1_f16_f16>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12,
+ nb10, nb11, nb12, nb13, item_ct1);
+ });
+ }
+}
+
+static void ggml_cpy_i16_i16_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
+ {
+ // dpct::has_capability_or_fail(stream->get_device(),
+ // {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_f16<cpy_1_i16_i16>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12,
+ nb10, nb11, nb12, nb13, item_ct1);
+ });
+ }
+}
+
+static void ggml_cpy_i32_i32_sycl(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = (ne + SYCL_CPY_BLOCK_SIZE - 1) / SYCL_CPY_BLOCK_SIZE;
+ {
+ // dpct::has_capability_or_fail(stream->get_device(),
+ // {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ cpy_f32_f16<cpy_1_i32_i32>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12,
+ nb10, nb11, nb12, nb13, item_ct1);
+ });
+ }
+}
+
+static void ggml_cpy_q8_0_q8_0(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_q<block_q8_0, QK8_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+
+static void ggml_cpy_q5_0_q5_0(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_q<block_q5_0, QK5_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+
+static void ggml_cpy_q5_1_q5_1(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE);
+
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_q<block_q5_1, QK5_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+
+static void ggml_cpy_q4_0_q4_0(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+ const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_q<block_q4_0, QK4_0>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+
+static void ggml_cpy_q4_1_q4_1(const char * cx, char * cdst, const int ne, const int ne00, const int ne01,
+ const int ne02, const int nb00, const int nb01, const int nb02, const int nb03,
+ const int ne10, const int ne11, const int ne12, const int nb10, const int nb11,
+ const int nb12, const int nb13, queue_ptr stream) {
+
+ const int num_blocks = ceil_div(ne, SYCL_CPY_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE), sycl::range<3>(1, 1, SYCL_CPY_BLOCK_SIZE)), [=](sycl::nd_item<3> item_ct1) {
+ cpy_q_q<block_q4_1, QK4_1>(cx, cdst, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, item_ct1);
+ });
+}
+
+void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1) try {
+ // Unlike other operators ggml_sycl_cpy takes 2 distinct tensors instead of a dst ggml_tensor and rely on its src field
+ scope_op_debug_print scope_dbg_print(__func__, src1, /*num_src=*/0, debug_get_tensor_str("\tsrc0", src0));
+ const int64_t ne = ggml_nelements(src0);
+ GGML_ASSERT(ne == ggml_nelements(src1));
+
+ GGML_TENSOR_BINARY_OP_LOCALS01;
+
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ queue_ptr main_stream = ctx.stream();
+
+ char * src0_ddc = (char *) src0->data;
+ char * src1_ddc = (char *) src1->data;
+ if ((src0->type == src1->type) && (ggml_is_contiguous(src0) && ggml_is_contiguous(src1))) {
+ GGML_SYCL_DEBUG("%s: memcpy path\n", __func__);
+ main_stream->memcpy(src1_ddc, src0_ddc, ggml_nbytes(src0));
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F32) {
+ ggml_cpy_f32_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_F16) {
+ ggml_cpy_f32_f16_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q8_0) {
+ ggml_cpy_f32_q8_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_0) {
+ ggml_cpy_f32_q4_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q4_1) {
+ ggml_cpy_f32_q4_1_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F32) {
+ ggml_cpy_f16_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F16 && src1->type == GGML_TYPE_F16) {
+ ggml_cpy_f16_f16_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_I16 && src1->type == GGML_TYPE_I16) {
+ ggml_cpy_i16_i16_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_I32 && src1->type == GGML_TYPE_I32) {
+ ggml_cpy_i32_i32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q4_0 && src1->type == GGML_TYPE_F32) {
+ ggml_cpy_q4_0_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q4_1 && src1->type == GGML_TYPE_F32) {
+ ggml_cpy_q4_1_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q8_0 && src1->type == GGML_TYPE_F32) {
+ ggml_cpy_q8_0_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_0) {
+ ggml_cpy_f32_q5_0_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q5_0 && src1->type == GGML_TYPE_F32) {
+ ggml_cpy_q5_0_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_Q5_1) {
+ ggml_cpy_f32_q5_1_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q5_1 && src1->type == GGML_TYPE_F32) {
+ ggml_cpy_q5_1_f32_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10,
+ nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_F32 && src1->type == GGML_TYPE_IQ4_NL) {
+ ggml_cpy_f32_iq4_nl_sycl(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12,
+ nb10, nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q8_0 && src1->type == GGML_TYPE_Q8_0) {
+ ggml_cpy_q8_0_q8_0(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q5_0 && src1->type == GGML_TYPE_Q5_0) {
+ ggml_cpy_q5_0_q5_0(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q5_1 && src1->type == GGML_TYPE_Q5_1) {
+ ggml_cpy_q5_1_q5_1(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q4_0 && src1->type == GGML_TYPE_Q4_0) {
+ ggml_cpy_q4_0_q4_0(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
+ } else if (src0->type == GGML_TYPE_Q4_1 && src1->type == GGML_TYPE_Q4_1) {
+ ggml_cpy_q4_1_q4_1(src0_ddc, src1_ddc, ne, ne00, ne01, ne02, nb00, nb01, nb02, nb03, ne10, ne11, ne12, nb10, nb11, nb12, nb13, main_stream);
+ } else {
+ GGML_LOG_ERROR("%s: unsupported type combination (%s to %s)\n", __func__, ggml_type_name(src0->type),
+ ggml_type_name(src1->type));
+ GGML_ABORT("fatal error");
+ }
+} catch (const sycl::exception & exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+void ggml_sycl_dup(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_cpy(ctx, dst->src[0], dst);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/cpy.hpp b/llama.cpp/ggml/src/ggml-sycl/cpy.hpp
new file mode 100644
index 0000000..3c331f1
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/cpy.hpp
@@ -0,0 +1,223 @@
+#ifndef GGML_SYCL_CPY_HPP
+#define GGML_SYCL_CPY_HPP
+
+#include "common.hpp"
+#include <float.h>
+
+typedef void (*cpy_kernel_t)(const char * cx, char * cdst);
+
+__dpct_inline__ int best_index_int8(int n, const int8_t * val, float x) {
+ if (x <= val[0]) {
+ return 0;
+ }
+ if (x >= val[n - 1]) {
+ return n - 1;
+ }
+ int ml = 0, mu = n - 1;
+ while (mu - ml > 1) {
+ int mav = (ml + mu) / 2;
+ if (x < val[mav]) {
+ mu = mav;
+ } else {
+ ml = mav;
+ }
+ }
+ return x - val[mu - 1] < val[mu] - x ? mu - 1 : mu;
+}
+
+inline void cpy_blck_f32_q8_0(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ block_q8_0 * dsti = (block_q8_0 *) cdsti;
+
+ float amax = 0.0f; // absolute max
+
+ for (int j = 0; j < QK8_0; j++) {
+ const float v = xi[j];
+ amax = sycl::fmax(amax, sycl::fabs((float) v));
+ }
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f / d : 0.0f;
+
+ dsti->d = d;
+
+ for (int j = 0; j < QK8_0; ++j) {
+ const float x0 = xi[j] * id;
+
+ dsti->qs[j] = sycl::round((float) x0);
+ }
+}
+
+inline void cpy_blck_f32_q4_0(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ block_q4_0 * dsti = (block_q4_0 *) cdsti;
+
+ float amax = 0.0f;
+ float vmax = 0.0f;
+
+ for (int j = 0; j < QK4_0; ++j) {
+ const float v = xi[j];
+ if (amax < sycl::fabs((float) v)) {
+ amax = sycl::fabs((float) v);
+ vmax = v;
+ }
+ }
+
+ const float d = vmax / -8;
+ const float id = d ? 1.0f / d : 0.0f;
+
+ dsti->d = d;
+
+ for (int j = 0; j < QK4_0 / 2; ++j) {
+ const float x0 = xi[0 + j] * id;
+ const float x1 = xi[QK4_0 / 2 + j] * id;
+
+ const uint8_t xi0 = dpct::min(15, (int8_t) (x0 + 8.5f));
+ const uint8_t xi1 = dpct::min(15, (int8_t) (x1 + 8.5f));
+
+ dsti->qs[j] = xi0;
+ dsti->qs[j] |= xi1 << 4;
+ }
+}
+
+inline void cpy_blck_f32_q4_1(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ block_q4_1 * dsti = (block_q4_1 *) cdsti;
+
+ float vmin = FLT_MAX;
+ float vmax = -FLT_MAX;
+
+ for (int j = 0; j < QK4_1; ++j) {
+ const float v = xi[j];
+
+ vmin = sycl::min(v, vmin);
+ vmax = sycl::max(v, vmax);
+ }
+
+ const float d = (vmax - vmin) / ((1 << 4) - 1);
+ const float id = d ? 1.0f / d : 0.0f;
+
+ dsti->dm.x() = d;
+ dsti->dm.y() = vmin;
+
+ for (int j = 0; j < QK4_1 / 2; ++j) {
+ const float x0 = (xi[0 + j] - vmin) * id;
+ const float x1 = (xi[QK4_1 / 2 + j] - vmin) * id;
+
+ const uint8_t xi0 = dpct::min(15, (int8_t) (x0 + 0.5f));
+ const uint8_t xi1 = dpct::min(15, (int8_t) (x1 + 0.5f));
+
+ dsti->qs[j] = xi0;
+ dsti->qs[j] |= xi1 << 4;
+ }
+}
+
+inline void cpy_blck_f32_q5_0(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ block_q5_0 * dsti = (block_q5_0 *) cdsti;
+
+ float amax = 0.0f;
+ float vmax = 0.0f;
+
+ for (int j = 0; j < QK5_0; ++j) {
+ const float v = xi[j];
+ if (amax < sycl::fabs((float) v)) {
+ amax = sycl::fabs((float) v);
+ vmax = v;
+ }
+ }
+
+ const float d = vmax / -16;
+ const float id = d ? 1.0f / d : 0.0f;
+
+ dsti->d = d;
+
+ uint32_t qh = 0;
+ for (int j = 0; j < QK5_0 / 2; ++j) {
+ const float x0 = xi[0 + j] * id;
+ const float x1 = xi[QK5_0 / 2 + j] * id;
+
+ const uint8_t xi0 = dpct::min(31, (int8_t) (x0 + 16.5f));
+ const uint8_t xi1 = dpct::min(31, (int8_t) (x1 + 16.5f));
+
+ dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4);
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0 / 2);
+ }
+ memcpy(dsti->qh, &qh, sizeof(qh));
+}
+
+inline void cpy_blck_f32_q5_1(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ block_q5_1 * dsti = (block_q5_1 *) cdsti;
+
+ float min = xi[0];
+ float max = xi[0];
+
+ for (int j = 1; j < QK5_1; ++j) {
+ const float v = xi[j];
+ min = v < min ? v : min;
+ max = v > max ? v : max;
+ }
+
+ const float d = (max - min) / 31;
+ const float id = d ? 1.0f / d : 0.0f;
+
+ dsti->dm.x() = d;
+ dsti->dm.y() = min;
+
+ uint32_t qh = 0;
+ for (int j = 0; j < QK5_1 / 2; ++j) {
+ const float x0 = (xi[0 + j] - min) * id;
+ const float x1 = (xi[QK5_1 / 2 + j] - min) * id;
+
+ const uint8_t xi0 = (uint8_t) (x0 + 0.5f);
+ const uint8_t xi1 = (uint8_t) (x1 + 0.5f);
+
+ dsti->qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4);
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1 / 2);
+ }
+ memcpy(dsti->qh, &qh, sizeof(qh));
+}
+
+inline void cpy_blck_f32_iq4_nl(const char * cxi, char * cdsti) {
+ const float * xi = (const float *) cxi;
+ block_iq4_nl * dsti = (block_iq4_nl *) cdsti;
+
+ float amax = 0.0f;
+ float vmax = 0.0f;
+
+ for (int j = 0; j < QK4_NL; ++j) {
+ const float v = xi[j];
+ if (amax < sycl::fabs((float) v)) {
+ amax = sycl::fabs((float) v);
+ vmax = v;
+ }
+ }
+
+ float d = vmax / kvalues_iq4nl[0];
+ const float id = d ? 1.0f / d : 0.0f;
+
+ float sumqx = 0, sumq2 = 0;
+ for (int j = 0; j < QK4_NL / 2; ++j) {
+ const float x0 = xi[0 + j] * id;
+ const float x1 = xi[QK4_NL / 2 + j] * id;
+ const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl, x0);
+ const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl, x1);
+ dsti->qs[j] = xi0 | (xi1 << 4);
+ const float v0 = kvalues_iq4nl[xi0];
+ const float v1 = kvalues_iq4nl[xi1];
+ const float w0 = xi[0 + j] * xi[0 + j];
+ const float w1 = xi[QK4_NL / 2 + j] * xi[QK4_NL / 2 + j];
+ sumqx += w0 * v0 * xi[j] + w1 * v1 * xi[QK4_NL / 2 + j];
+ sumq2 += w0 * v0 * v0 + w1 * v1 * v1;
+ }
+
+ dsti->d = sumq2 > 0 ? sumqx / sumq2 : d;
+}
+
+void ggml_sycl_cpy(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1);
+void ggml_sycl_dup(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_CPY_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp b/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp
new file mode 100644
index 0000000..da2a605
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/dequantize.hpp
@@ -0,0 +1,841 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_DEQUANTIZE_HPP
+#define GGML_SYCL_DEQUANTIZE_HPP
+
+#include "common.hpp"
+
+typedef void (*dequantize_kernel_t)(const void * vx, const int64_t ib, const int iqs, dfloat2 & v);
+typedef void (*dequantize_kernel_t_reorder)(const void *d, const int64_t ib, const void *qs,
+ const int iqs, dfloat2 &v);
+
+static __dpct_inline__ void dequantize_q4_0(const void *vx, const int64_t ib,
+ const int iqs, dfloat2 &v) {
+ const block_q4_0 * x = (const block_q4_0 *) vx;
+
+ const dfloat d = x[ib].d;
+
+ const int vui = x[ib].qs[iqs];
+
+ v.x() = vui & 0xF;
+ v.y() = vui >> 4;
+
+#ifdef GGML_SYCL_F16
+ // v = v - {8.0f, 8.0f};
+ // v = v * {d, d};
+ v.s0() = (v.s0() - 8.0f) * d;
+ v.s1() = (v.s1() - 8.0f) * d;
+
+#else
+ v.x() = (v.x() - 8.0f) * d;
+ v.y() = (v.y() - 8.0f) * d;
+#endif // GGML_SYCL_F16
+}
+
+static __dpct_inline__ void dequantize_q4_0_reorder(const void *d_ptr, const int64_t ib, const void *qs,
+ const int iqs, dfloat2 &v) {
+ // const block_q4_0 * x = (const block_q4_0 *) vx;
+
+ const dfloat d = (const dfloat)*((const sycl::half*)d_ptr+ib);
+
+ const int vui = *((const uint8_t *)qs+iqs);
+
+ v.x() = vui & 0xF;
+ v.y() = vui >> 4;
+
+#ifdef GGML_SYCL_F16
+ // v = v - {8.0f, 8.0f};
+ // v = v * {d, d};
+ v.s0() = (v.s0() - 8.0f) * d;
+ v.s1() = (v.s1() - 8.0f) * d;
+
+#else
+ v.x() = (v.x() - 8.0f) * d;
+ v.y() = (v.y() - 8.0f) * d;
+#endif // GGML_SYCL_F16
+}
+
+static __dpct_inline__ void dequantize_q4_1(const void *vx, const int64_t ib,
+ const int iqs, dfloat2 &v) {
+ const block_q4_1 * x = (const block_q4_1 *) vx;
+
+ const dfloat d = x[ib].dm[0];
+ const dfloat m = x[ib].dm[1];
+
+ const int vui = x[ib].qs[iqs];
+
+ v.x() = vui & 0xF;
+ v.y() = vui >> 4;
+
+#ifdef GGML_SYCL_F16
+ // v = v * {d, d};
+ // v = v + {m, m};
+ v.s0() = sycl::fma(v.s0(), d, m);
+ v.s1() = sycl::fma(v.s1(), d, m);
+
+#else
+ v.x() = sycl::fma(v.x(), d, m);
+ v.y() = sycl::fma(v.y(), d, m);
+#endif // GGML_SYCL_F16
+}
+
+static __dpct_inline__ void dequantize_q5_0(const void *vx, const int64_t ib,
+ const int iqs, dfloat2 &v) {
+ const block_q5_0 * x = (const block_q5_0 *) vx;
+
+ const dfloat d = x[ib].d;
+
+ uint32_t qh;
+ memcpy(&qh, x[ib].qh, sizeof(qh));
+
+ const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
+ const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
+
+ v.x() = ((x[ib].qs[iqs] & 0xf) | xh_0);
+ v.y() = ((x[ib].qs[iqs] >> 4) | xh_1);
+
+#ifdef GGML_SYCL_F16
+ // v = v - {16.0f, 16.0f};
+ // v = v * {d, d};
+ v.s0() = (v.s0() - 16.0f) * d;
+ v.s1() = (v.s1() - 16.0f) * d;
+
+#else
+ v.x() = (v.x() - 16.0f) * d;
+ v.y() = (v.y() - 16.0f) * d;
+#endif // GGML_SYCL_F16
+}
+
+static __dpct_inline__ void dequantize_q5_1(const void *vx, const int64_t ib,
+ const int iqs, dfloat2 &v) {
+ const block_q5_1 * x = (const block_q5_1 *) vx;
+
+ const dfloat d = x[ib].dm[0];
+ const dfloat m = x[ib].dm[1];
+
+ uint32_t qh;
+ memcpy(&qh, x[ib].qh, sizeof(qh));
+
+ const int xh_0 = ((qh >> (iqs + 0)) << 4) & 0x10;
+ const int xh_1 = ((qh >> (iqs + 12)) ) & 0x10;
+
+ v.x() = ((x[ib].qs[iqs] & 0xf) | xh_0);
+ v.y() = ((x[ib].qs[iqs] >> 4) | xh_1);
+
+#ifdef GGML_SYCL_F16
+ // v = v * {d, d};
+ // v = v + {m, m};
+ v.s0() = sycl::fma(v.s0(), d, m);
+ v.s1() = sycl::fma(v.s1(), d, m);
+#else
+ v.x() = sycl::fma(v.x(), d, m);
+ v.y() = sycl::fma(v.y(), d, m);
+#endif // GGML_SYCL_F16
+}
+
+static __dpct_inline__ void dequantize_q8_0(const void *vx, const int64_t ib,
+ const int iqs, dfloat2 &v) {
+ const block_q8_0 * x = (const block_q8_0 *) vx;
+
+ const dfloat d = x[ib].d;
+
+ v.x() = x[ib].qs[iqs + 0];
+ v.y() = x[ib].qs[iqs + 1];
+
+#ifdef GGML_SYCL_F16
+ // v = v * {d, d};
+ v.s0() *= d;
+ v.s1() *= d;
+#else
+ v.x() *= d;
+ v.y() *= d;
+#endif // GGML_SYCL_F16
+}
+
+template<typename dst_t>
+static void dequantize_block_q4_0(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int64_t i = item_ct1.get_group(2);
+
+ // assume 32 threads
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t il = tid/8;
+ const int64_t ir = tid%8;
+ const int64_t ib = 8*i + ir;
+ if (ib >= nb32) {
+ return;
+ }
+
+ dst_t * y = yy + 256*i + 32*ir + 4*il;
+
+ const block_q4_0 * x = (const block_q4_0 *)vx + ib;
+ const float d = sycl::vec<sycl::half, 1>(x->d)
+ .convert<float, sycl::rounding_mode::automatic>()[0];
+ const float dm = -8*d;
+
+ const uint8_t * q = x->qs + 4*il;
+
+ for (int l = 0; l < 4; ++l) {
+ y[l+ 0] = d * (q[l] & 0xF) + dm;
+ y[l+16] = d * (q[l] >> 4) + dm;
+ }
+}
+
+template<typename dst_t>
+static void dequantize_block_q4_0_reorder(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int64_t i = item_ct1.get_group(2);
+ auto k=nb32;
+ // assume 32 threads
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int lane_ib = i * WARP_SIZE + tid;
+
+ if (lane_ib >= k / QK4_0) {
+ return;
+ }
+
+ dst_t * y_ptr = yy + lane_ib * QK4_0;
+
+ auto qs = (const uint8_t*)vx + lane_ib * QK4_0 / 2;
+ auto s_ptr = (const sycl::half*)((const uint8_t*)vx + k / 2) + lane_ib;
+
+ const float d = float(*s_ptr);
+
+#pragma unroll
+ for (int l = 0; l < QK4_0 / 2; ++l) {
+ int vq = qs[l];
+ y_ptr[l + 0] = d * ((vq & 0xF) - 8);
+ y_ptr[l + 16] = d * ((vq >> 4) - 8);
+ }
+
+}
+
+template<typename dst_t>
+static void dequantize_block_q4_1(const void * __restrict__ vx, dst_t * __restrict__ yy, int64_t nb32,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int64_t i = item_ct1.get_group(2);
+
+ // assume 32 threads
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t il = tid/8;
+ const int64_t ir = tid%8;
+ const int64_t ib = 8*i + ir;
+ if (ib >= nb32) {
+ return;
+ }
+
+ dst_t * y = yy + 256*i + 32*ir + 4*il;
+
+ const block_q4_1 * x = (const block_q4_1 *)vx + ib;
+ const sycl::float2 d =
+ x->dm.convert<float, sycl::rounding_mode::automatic>();
+
+ const uint8_t * q = x->qs + 4*il;
+
+ for (int l = 0; l < 4; ++l) {
+ y[l + 0] = d.x() * (q[l] & 0xF) + d.y();
+ y[l + 16] = d.x() * (q[l] >> 4) + d.y();
+ }
+}
+
+
+//================================== k-quants
+
+template<typename dst_t>
+static void dequantize_block_q2_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_q2_K * x = (const block_q2_K *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t n = tid/32;
+ const int64_t l = tid - 32*n;
+ const int64_t is = 8*n + l/16;
+
+ const uint8_t q = x[i].qs[32*n + l];
+ dst_t * y = yy + i*QK_K + 128*n;
+
+ float dall = x[i].dm[0];
+ float dmin = x[i].dm[1];
+ y[l+ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
+ y[l+32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 2) & 3) - dmin * (x[i].scales[is+2] >> 4);
+ y[l+64] = dall * (x[i].scales[is+4] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+4] >> 4);
+ y[l+96] = dall * (x[i].scales[is+6] & 0xF) * ((q >> 6) & 3) - dmin * (x[i].scales[is+6] >> 4);
+#else
+ const int64_t is = tid/16; // 0 or 1
+ const int64_t il = tid%16; // 0...15
+ const uint8_t q = x[i].qs[il] >> (2*is);
+ dst_t * y = yy + i*QK_K + 16*is + il;
+
+ float dall = x[i].dm[0];
+ float dmin = x[i].dm[1];
+ y[ 0] = dall * (x[i].scales[is+0] & 0xF) * ((q >> 0) & 3) - dmin * (x[i].scales[is+0] >> 4);
+ y[32] = dall * (x[i].scales[is+2] & 0xF) * ((q >> 4) & 3) - dmin * (x[i].scales[is+2] >> 4);
+#endif
+
+}
+
+template<typename dst_t>
+static void dequantize_block_q3_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_q3_K * x = (const block_q3_K *) vx;
+
+#if QK_K == 256
+ const int64_t r = item_ct1.get_local_id(2) / 4;
+ const int64_t tid = r/2;
+ const int64_t is0 = r%2;
+ const int64_t l0 = 16 * is0 + 4 * (item_ct1.get_local_id(2) % 4);
+ const int64_t n = tid / 4;
+ const int64_t j = tid - 4*n;
+
+ uint8_t m = 1 << (4*n + j);
+ int64_t is = 8*n + 2*j + is0;
+ int shift = 2*j;
+
+ int8_t us = is < 4 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+8] >> 0) & 3) << 4) :
+ is < 8 ? (x[i].scales[is-0] & 0xF) | (((x[i].scales[is+4] >> 2) & 3) << 4) :
+ is < 12 ? (x[i].scales[is-8] >> 4) | (((x[i].scales[is+0] >> 4) & 3) << 4) :
+ (x[i].scales[is-8] >> 4) | (((x[i].scales[is-4] >> 6) & 3) << 4);
+ float d_all = x[i].d;
+ float dl = d_all * (us - 32);
+
+ dst_t * y = yy + i*QK_K + 128*n + 32*j;
+ const uint8_t * q = x[i].qs + 32*n;
+ const uint8_t * hm = x[i].hmask;
+
+ for (int l = l0; l < l0+4; ++l) y[l] = dl * ((int8_t)((q[l] >> shift) & 3) - ((hm[l] & m) ? 0 : 4));
+#else
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t is = tid/16; // 0 or 1
+ const int64_t il = tid%16; // 0...15
+ const int64_t im = il/8; // 0...1
+ const int64_t in = il%8; // 0...7
+
+ dst_t * y = yy + i*QK_K + 16*is + il;
+
+ const uint8_t q = x[i].qs[il] >> (2*is);
+ const uint8_t h = x[i].hmask[in] >> (2*is + im);
+ const float d = (float)x[i].d;
+
+ if (is == 0) {
+ y[ 0] = d * ((x[i].scales[0] & 0xF) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
+ y[32] = d * ((x[i].scales[1] & 0xF) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
+ } else {
+ y[ 0] = d * ((x[i].scales[0] >> 4) - 8) * ((int8_t)((q >> 0) & 3) - ((h >> 0) & 1 ? 0 : 4));
+ y[32] = d * ((x[i].scales[1] >> 4) - 8) * ((int8_t)((q >> 4) & 3) - ((h >> 4) & 1 ? 0 : 4));
+ }
+#endif
+
+}
+
+#if QK_K == 256
+static inline void get_scale_min_k4(int j, const uint8_t * q, uint8_t & d, uint8_t & m) {
+ if (j < 4) {
+ d = q[j] & 63;
+ m = q[j + 4] & 63;
+ } else {
+ d = (q[j+4] & 0xF) | ((q[j-4] >> 6) << 4);
+ m = (q[j+4] >> 4) | ((q[j-0] >> 6) << 4);
+ }
+}
+#endif
+
+template <typename dst_t>
+inline void dequantize_q4_K_common(dst_t * __restrict__ y, const uint8_t * __restrict__ qs_ptr, const float dall,
+ const float dmin, uint8_t * __restrict__ scales_local, int il, int ir) {
+ const int is = 2 * il;
+ constexpr int n = 4;
+
+ uint8_t sc, m;
+ get_scale_min_k4(is + 0, scales_local, sc, m);
+ const float d1 = dall * sc;
+ const float m1 = dmin * m;
+
+ get_scale_min_k4(is + 1, scales_local, sc, m);
+ const float d2 = dall * sc;
+ const float m2 = dmin * m;
+
+ sycl::vec<uint8_t, n> q_vec = vec_aligned_load<uint8_t, n>(qs_ptr + 32 * il + n * ir);
+ for (int l = 0; l < n; ++l) {
+ y[l + 0] = d1 * (q_vec[l] & 0xF) - m1;
+ y[l + 32] = d2 * (q_vec[l] >> 4) - m2;
+ }
+}
+
+template<typename dst_t>
+static void dequantize_block_q4_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ uint8_t* scales_local, const sycl::nd_item<3> &item_ct1) {
+ const block_q4_K * x = (const block_q4_K *) vx;
+
+ const int64_t i = item_ct1.get_group(2);
+
+#if QK_K == 256
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t il = tid / 8;
+ const int64_t ir = tid % 8;
+
+ dst_t * y = yy + i * QK_K + 64 * il + 4 * ir;
+
+ const sycl::half2 dm = x[i].dm;
+ const float dall = dm[0];
+ const float dmin = dm[1];
+
+ if (tid < 12) {
+ scales_local[tid] = x[i].scales[tid];
+ }
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ dequantize_q4_K_common(y, x[i].qs, dall, dmin, scales_local, il, ir);
+#else
+ const int64_t tid = item_ct1.get_local_id(2);
+ const uint8_t * q = x[i].qs;
+ dst_t * y = yy + i*QK_K;
+ const float d = (float)x[i].dm[0];
+ const float m = (float)x[i].dm[1];
+ y[tid+ 0] = d * (x[i].scales[0] & 0xF) * (q[tid] & 0xF) - m * (x[i].scales[0] >> 4);
+ y[tid+32] = d * (x[i].scales[1] & 0xF) * (q[tid] >> 4) - m * (x[i].scales[1] >> 4);
+#endif
+}
+
+template <typename dst_t>
+static void dequantize_block_q4_K_reorder(const void * __restrict__ vx, dst_t * __restrict__ yy, uint8_t * scales_local,
+ const sycl::nd_item<1> & item_ct1, int64_t nb) {
+ const int64_t i = item_ct1.get_group(0); // block index
+ const int64_t tid = item_ct1.get_local_id(0); // thread index within block
+ const int64_t il = tid / 8;
+ const int64_t ir = tid % 8;
+
+ dst_t * y = yy + i * QK_K + 64 * il + 4 * ir;
+
+ const uint8_t * base = static_cast<const uint8_t *>(vx);
+ const size_t qs_offset = i * (QK_K / 2);
+ const size_t scales_offset = nb * (QK_K / 2) + i * K_SCALE_SIZE;
+ const size_t dm_offset = nb * (QK_K / 2) + nb * K_SCALE_SIZE + i * sizeof(ggml_half2);
+
+ const uint8_t * qs_ptr = base + qs_offset;
+ const uint8_t * scales_ptr = base + scales_offset;
+ ggml_half2 dm_values = *reinterpret_cast<const ggml_half2 *>(base + dm_offset);
+
+ const float dall = dm_values.x();
+ const float dmin = dm_values.y();
+
+ if (tid < 12) {
+ scales_local[tid] = scales_ptr[tid];
+ }
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ dequantize_q4_K_common(y, qs_ptr, dall, dmin, scales_local, il, ir);
+}
+
+template<typename dst_t>
+static void dequantize_block_q5_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+ const block_q5_K * x = (const block_q5_K *) vx;
+
+ const int64_t i = item_ct1.get_group(2);
+
+#if QK_K == 256
+ // assume 64 threads - this is very slightly better than the one below
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t il = tid/16; // il is in 0...3
+ const int64_t ir = tid%16; // ir is in 0...15
+ const int64_t is = 2*il; // is is in 0...6
+
+ dst_t * y = yy + i*QK_K + 64*il + 2*ir;
+
+ const float dall = x[i].dm[0];
+ const float dmin = x[i].dm[1];
+
+ const uint8_t * ql = x[i].qs + 32*il + 2*ir;
+ const uint8_t * qh = x[i].qh + 2*ir;
+
+ uint8_t sc, m;
+ get_scale_min_k4(is + 0, x[i].scales, sc, m);
+ const float d1 = dall * sc; const float m1 = dmin * m;
+ get_scale_min_k4(is + 1, x[i].scales, sc, m);
+ const float d2 = dall * sc; const float m2 = dmin * m;
+
+ uint8_t hm = 1 << (2*il);
+ y[ 0] = d1 * ((ql[ 0] & 0xF) + (qh[ 0] & hm ? 16 : 0)) - m1;
+ y[ 1] = d1 * ((ql[ 1] & 0xF) + (qh[ 1] & hm ? 16 : 0)) - m1;
+ hm <<= 1;
+ y[32] = d2 * ((ql[ 0] >> 4) + (qh[ 0] & hm ? 16 : 0)) - m2;
+ y[33] = d2 * ((ql[ 1] >> 4) + (qh[ 1] & hm ? 16 : 0)) - m2;
+#else
+ const int64_t tid = item_ct1.get_local_id(2);
+ const uint8_t q = x[i].qs[tid];
+ const int64_t im = tid/8; // 0...3
+ const int64_t in = tid%8; // 0...7
+ const int64_t is = tid/16; // 0 or 1
+ const uint8_t h = x[i].qh[in] >> im;
+ const float d = x[i].d;
+ dst_t * y = yy + i*QK_K + tid;
+ y[ 0] = d * x[i].scales[is+0] * ((q & 0xF) - ((h >> 0) & 1 ? 0 : 16));
+ y[32] = d * x[i].scales[is+2] * ((q >> 4) - ((h >> 4) & 1 ? 0 : 16));
+#endif
+}
+
+template<typename dst_t>
+static void dequantize_block_q6_K(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+ const block_q6_K * x = (const block_q6_K *) vx;
+
+ const int64_t i = item_ct1.get_group(2);
+#if QK_K == 256
+
+ // assume 64 threads - this is very slightly better than the one below
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t ip = tid/32; // ip is 0 or 1
+ const int64_t il = tid - 32*ip; // 0...32
+ const int64_t is = 8*ip + il/16;
+
+ dst_t * y = yy + i*QK_K + 128*ip + il;
+
+ const float d = x[i].d;
+
+ const uint8_t * ql = x[i].ql + 64*ip + il;
+ const uint8_t qh = x[i].qh[32*ip + il];
+ const int8_t * sc = x[i].scales + is;
+
+ y[ 0] = d * sc[0] * ((int8_t)((ql[ 0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
+ y[32] = d * sc[2] * ((int8_t)((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
+ y[64] = d * sc[4] * ((int8_t)((ql[ 0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
+ y[96] = d * sc[6] * ((int8_t)((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
+#else
+
+ // assume 32 threads
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t ip = tid/16; // 0 or 1
+ const int64_t il = tid - 16*ip; // 0...15
+
+ dst_t * y = yy + i*QK_K + 16*ip + il;
+
+ const float d = x[i].d;
+
+ const uint8_t ql = x[i].ql[16*ip + il];
+ const uint8_t qh = x[i].qh[il] >> (2*ip);
+ const int8_t * sc = x[i].scales;
+
+ y[ 0] = d * sc[ip+0] * ((int8_t)((ql & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
+ y[32] = d * sc[ip+2] * ((int8_t)((ql >> 4) | (((qh >> 4) & 3) << 4)) - 32);
+#endif
+}
+
+template <typename dst_t>
+static void dequantize_block_q6_K_reorder(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> & item_ct1, int64_t n_blocks) {
+ const int64_t ib = item_ct1.get_group(2);
+
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t ip = tid / 32; // ip is 0 or 1
+ const int64_t il = tid - 32 * ip; // 0...32
+ const int64_t is = 8 * ip + il / 16;
+
+ const uint8_t * base_ptr = static_cast<const uint8_t *>(vx);
+ const auto ql_offset = ib * (QK_K / 2);
+ const auto qh_offset = (QK_K / 2) * n_blocks + (QK_K / 4) * ib;
+ const auto base_scales_offset = (QK_K / 2) * n_blocks + (QK_K / 4) * n_blocks + (QK_K / 16) * ib;
+ const auto base_d_offset = ((QK_K / 2) + (QK_K / 4) + (QK_K / 16)) * n_blocks;
+ const uint8_t * ql_ptr = base_ptr + ql_offset;
+ const uint8_t * qh_ptr = base_ptr + qh_offset;
+ const uint8_t * scales_ptr = base_ptr + base_scales_offset;
+ const ggml_half * d = (const ggml_half *) (base_ptr + base_d_offset) + ib;
+
+ dst_t * y = yy + ib * QK_K + 128 * ip + il;
+
+ const uint8_t * ql = ql_ptr + 64 * ip + il;
+ const uint8_t qh = *(qh_ptr + 32 * ip + il);
+ const int8_t * sc = reinterpret_cast<const int8_t *>(scales_ptr + is);
+
+ y[0] = *d * sc[0] * ((int8_t) ((ql[0] & 0xF) | (((qh >> 0) & 3) << 4)) - 32);
+ y[32] = *d * sc[2] * ((int8_t) ((ql[32] & 0xF) | (((qh >> 2) & 3) << 4)) - 32);
+ y[64] = *d * sc[4] * ((int8_t) ((ql[0] >> 4) | (((qh >> 4) & 3) << 4)) - 32);
+ y[96] = *d * sc[6] * ((int8_t) ((ql[32] >> 4) | (((qh >> 6) & 3) << 4)) - 32);
+}
+
+template<typename dst_t>
+static void dequantize_block_iq2_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1,
+ const uint64_t *iq2xxs_grid_ptr,
+ const uint8_t *ksigns_iq2xs_ptr,
+ const uint8_t *kmask_iq2xs_ptr) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq2_xxs * x = (const block_iq2_xxs *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 8*il;
+ const uint16_t * q2 = x[i].qs + 4*ib;
+ const uint8_t * aux8 = (const uint8_t *)q2;
+ const uint8_t * grid = (const uint8_t *)(iq2xxs_grid_ptr + aux8[il]);
+ const uint32_t aux32 = q2[2] | (q2[3] << 16);
+ const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.25f;
+ const uint8_t signs = ksigns_iq2xs_ptr[(aux32 >> 7*il) & 127];
+ for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs_ptr[j] ? -1.f : 1.f);
+#else
+ assert(false);
+#endif
+
+}
+
+template<typename dst_t>
+static void dequantize_block_iq2_xs(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1,
+ const uint64_t *iq2xs_grid,
+ const uint8_t *ksigns_iq2xs,
+ const uint8_t *kmask_iq2xs) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq2_xs * x = (const block_iq2_xs *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 8*il;
+ const uint16_t * q2 = x[i].qs + 4*ib;
+ const uint8_t * grid = (const uint8_t *)(iq2xs_grid + (q2[il] & 511));
+ const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
+ const uint8_t signs = ksigns_iq2xs[q2[il] >> 9];
+ for (int j = 0; j < 8; ++j) y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
+#else
+ assert(false);
+#endif
+
+}
+
+template <typename dst_t>
+__dpct_inline__ static void
+dequantize_block_iq2_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq2_s * x = (const block_iq2_s *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 8*il;
+ const uint8_t * grid = (const uint8_t *)(iq2s_grid + (x[i].qs[4*ib+il] | ((x[i].qh[ib] << (8-2*il)) & 0x300)));
+ const float d = (float)x[i].d * (0.5f + ((x[i].scales[ib] >> 4*(il/2)) & 0xf)) * 0.25f;
+ const uint8_t signs = x[i].qs[QK_K/8+4*ib+il];
+#pragma unroll
+ for (int j = 0; j < 8; ++j)
+ y[j] = d * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
+#else
+ assert(false);
+
+#endif
+
+}
+
+template<typename dst_t>
+static void dequantize_block_iq3_xxs(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1,
+ const uint32_t *iq3xxs_grid,
+ const uint8_t *ksigns_iq2xs,
+ const uint8_t *kmask_iq2xs) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq3_xxs * x = (const block_iq3_xxs *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 8*il;
+ const uint8_t * q3 = x[i].qs + 8*ib;
+ const uint16_t * gas = (const uint16_t *)(x[i].qs + QK_K/4) + 2*ib;
+ const uint8_t * grid1 = (const uint8_t *)(iq3xxs_grid + q3[2*il+0]);
+ const uint8_t * grid2 = (const uint8_t *)(iq3xxs_grid + q3[2*il+1]);
+ const uint32_t aux32 = gas[0] | (gas[1] << 16);
+ const float d = (float)x[i].d * (0.5f + (aux32 >> 28)) * 0.5f;
+ const uint8_t signs = ksigns_iq2xs[(aux32 >> 7*il) & 127];
+ for (int j = 0; j < 4; ++j) {
+ y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
+ y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
+ }
+#else
+ assert(false);
+#endif
+
+}
+
+template <typename dst_t>
+__dpct_inline__ static void
+dequantize_block_iq3_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
+ const sycl::nd_item<3> &item_ct1,
+ const uint8_t *kmask_iq2xs, const uint32_t *iq3s_grid) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq3_s * x = (const block_iq3_s *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 8*il;
+ const uint8_t * qs = x[i].qs + 8*ib;
+ const uint8_t * grid1 = (const uint8_t *)(iq3s_grid + (qs[2*il+0] | ((x[i].qh[ib] << (8-2*il)) & 256)));
+ const uint8_t * grid2 = (const uint8_t *)(iq3s_grid + (qs[2*il+1] | ((x[i].qh[ib] << (7-2*il)) & 256)));
+ const float d = (float)x[i].d * (1 + 2*((x[i].scales[ib/2] >> 4*(ib%2)) & 0xf));
+ const uint8_t signs = x[i].signs[4*ib + il];
+#pragma unroll
+ for (int j = 0; j < 4; ++j) {
+ y[j+0] = d * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
+ y[j+4] = d * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
+ }
+#else
+ assert(false);
+#endif
+
+}
+
+template <typename dst_t>
+__dpct_inline__ static void
+dequantize_block_iq1_s(const void *__restrict__ vx, dst_t *__restrict__ yy,
+ const sycl::nd_item<3> &item_ct1,
+ const uint32_t *iq1s_grid_gpu) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq1_s * x = (const block_iq1_s *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 8*il;
+ const float delta = x[i].qh[ib] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA;
+ const float d = (float)x[i].d * (2*((x[i].qh[ib] >> 12) & 7) + 1);
+ uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32;
+ grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[ib] >> 3*il) & 7) << 8)];
+ grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
+ grid32[0] &= 0x0f0f0f0f;
+#pragma unroll
+ for (int j = 0; j < 8; ++j) {
+ y[j] = d * (q[j] + delta);
+ }
+#else
+ assert(false);
+#endif
+
+}
+
+template <typename dst_t>
+__dpct_inline__ static void
+dequantize_block_iq1_m(const void *__restrict__ vx, dst_t *__restrict__ yy,
+ const sycl::nd_item<3> &item_ct1,
+ const uint32_t *iq1s_grid_gpu) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq1_m * x = (const block_iq1_m *) vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+#if QK_K == 256
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 8*il;
+ const uint16_t * sc = (const uint16_t *)x[i].scales;
+ iq1m_scale_t scale;
+ scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
+ const int ib16 = 2*ib + il/2; // sc[ib16/4] >> 3*(ib16%4) -> sc[ib/2] >> 3*((2*ib+il/2)%4);
+ const float d = (float)scale.f16 * (2*((sc[ib16/4] >> 3*(ib16%4)) & 0x7) + 1);
+ const float delta = x[i].qh[2*ib+il/2] & (0x08 << 4*(il%2)) ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA;
+ uint32_t grid32[2]; const int8_t * q = (const int8_t *)grid32;
+ grid32[0] = iq1s_grid_gpu[x[i].qs[4*ib+il] | (((x[i].qh[2*ib+il/2] >> 4*(il%2)) & 7) << 8)];
+ grid32[1] = (grid32[0] >> 4) & 0x0f0f0f0f;
+ grid32[0] &= 0x0f0f0f0f;
+#pragma unroll
+ for (int j = 0; j < 8; ++j) {
+ y[j] = d * (q[j] + delta);
+ }
+#else
+ assert(false);
+#endif
+
+}
+
+template <typename dst_t>
+__dpct_inline__ static void
+dequantize_block_iq4_nl(const void *__restrict__ vx, dst_t *__restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq4_nl * x = (const block_iq4_nl *) vx + i*(QK_K/QK4_NL);
+
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 4*il;
+ const uint8_t * q4 = x[ib].qs + 4*il;
+ const float d = (float)x[ib].d;
+#pragma unroll
+ for (int j = 0; j < 4; ++j) {
+ y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf];
+ y[j+16] = d * kvalues_iq4nl[q4[j] >> 4];
+ }
+
+}
+
+
+template <typename dst_t>
+__dpct_inline__ static void
+dequantize_block_iq4_xs(const void *__restrict__ vx, dst_t *__restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+ const int64_t i = item_ct1.get_group(2);
+ const block_iq4_xs * x = (const block_iq4_xs *)vx;
+
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 4*il;
+ const uint8_t * q4 = x[i].qs + 16*ib + 4*il;
+ const float d = (float)x[i].d * ((((x[i].scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((x[i].scales_h >> 2*ib) & 3) << 4)) - 32);
+#pragma unroll
+ for (int j = 0; j < 4; ++j) {
+ y[j+ 0] = d * kvalues_iq4nl[q4[j] & 0xf];
+ y[j+16] = d * kvalues_iq4nl[q4[j] >> 4];
+ }
+}
+
+template<typename dst_t>
+static void dequantize_block_mxfp4(const void * __restrict__ vx, dst_t * __restrict__ yy,
+ const sycl::nd_item<3> &item_ct1) {
+ // auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>();
+ const int64_t i = item_ct1.get_group(2);
+ const block_mxfp4 * x = (const block_mxfp4 *) vx + i*(QK_K/QK_MXFP4);
+
+ const int64_t tid = item_ct1.get_local_id(2);
+ const int64_t il = tid/8; // 0...3
+ const int64_t ib = tid%8; // 0...7
+ dst_t * y = yy + i*QK_K + 32*ib + 4*il;
+ const uint8_t * q4 = x[ib].qs + 4*il;
+ const float d = ggml_sycl_e8m0_to_fp32(x[ib].e);
+ for (int j = 0; j < 4; ++j) {
+ y[j+ 0] = d * kvalues_mxfp4[q4[j] & 0xf]*0.5f;
+ y[j+16] = d * kvalues_mxfp4[q4[j] >> 4]*0.5f;
+ }
+}
+
+#endif // GGML_SYCL_DEQUANTIZE_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp b/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp
new file mode 100644
index 0000000..4f27601
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/dmmv.cpp
@@ -0,0 +1,1162 @@
+#include "convert.hpp"
+#include "dmmv.hpp"
+#include "dequantize.hpp"
+#include "presets.hpp"
+
+static void convert_f16(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
+ const sycl::half *x = (const sycl::half *)vx;
+
+ // automatic half -> float type cast if dfloat == float
+ v.x() = x[ib + iqs + 0];
+ v.y() = x[ib + iqs + 1];
+}
+
+static void convert_f32(const void * vx, const int64_t ib, const int iqs, dfloat2 & v){
+ const float * x = (const float *) vx;
+
+ // automatic half -> float type cast if dfloat == float
+ v.x() = x[ib + iqs + 0];
+ v.y() = x[ib + iqs + 1];
+}
+
+template <int qk, int qr, dequantize_kernel_t dequantize_kernel>
+static void dequantize_mul_mat_vec(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ // qk = quantized weights per x block
+ // qr = number of quantized weights per data value in x block
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int tid = item_ct1.get_local_id(2);
+
+ const int iter_stride = 2*GGML_SYCL_DMMV_X;
+ const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter
+ const int y_offset = qr == 1 ? 1 : qk/2;
+
+// partial sum for each thread
+#ifdef GGML_SYCL_F16
+ sycl::half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics
+#else
+ float tmp = 0.0f;
+#endif // GGML_SYCL_F16
+
+ for (int i = 0; i < ncols; i += iter_stride) {
+ const int col = i + vals_per_iter*tid;
+ const int ib = (row*ncols + col)/qk; // x block index
+ const int iqs = (col%qk)/qr; // x quant index
+ const int iybs = col - col%qk; // y block start index
+
+// processing >2 values per i iter is faster for fast GPUs
+#pragma unroll
+ for (int j = 0; j < vals_per_iter; j += 2) {
+ // process 2 vals per j iter
+
+ // dequantize
+ // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
+ dfloat2 v;
+ dequantize_kernel(vx, ib, iqs + j/qr, v);
+
+ // matrix multiplication
+ // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
+#ifdef GGML_SYCL_F16
+ dfloat2 t1{y[iybs + iqs + j / qr + 0],
+ y[iybs + iqs + j / qr + y_offset]};
+
+ tmp += v * t1;
+#else
+ tmp += v.x() * y[iybs + iqs + j / qr + 0];
+ tmp += v.y() * y[iybs + iqs + j / qr + y_offset];
+#endif // GGML_SYCL_F16
+ }
+ }
+
+ // sum up partial sums and write back result
+ const int mask_start = ncols > GGML_SYCL_DMMV_X ? WARP_SIZE >> 1 : WARP_SIZE >> 2;
+ for (int mask = mask_start; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (tid == 0) {
+#ifdef GGML_SYCL_F16
+ dst[row] = tmp.x() + tmp.y();
+#else
+ dst[row] = tmp;
+#endif // GGML_SYCL_F16
+ }
+}
+
+template <int qk, int qr, dequantize_kernel_t_reorder dequantize_kernel_reorder>
+static void dequantize_mul_mat_vec_reorder(const void * __restrict__ vx, const dfloat * __restrict__ y, float * __restrict__ dst, const int ncols, const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ // qk = quantized weights per x block
+ // qr = number of quantized weights per data value in x block
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int tid = item_ct1.get_local_id(2);
+
+
+ const int ncols_left = ncols % (QK4_0*WARP_SIZE);
+ const int ncols_align = ncols - ncols_left;
+ const int iter_stride = 8*2*GGML_SYCL_DMMV_X;
+ const int vals_per_iter = iter_stride / WARP_SIZE; // num quantized vals per thread and i iter //64/16=4, 512/16/2= 16
+ const int y_offset = qr == 1 ? 1 : qk/2;
+
+// partial sum for each thread
+#ifdef GGML_SYCL_F16
+ sycl::half2 tmp = {0.0f, 0.0f}; // two sums for f16 to take advantage of half2 intrinsics
+#else
+ float tmp = 0.0f;
+#endif // GGML_SYCL_F16
+ const char *d_ptr = (const char*)vx+ncols*nrows/2;
+ int i=0;
+ for (i = 0; i < ncols_align; i += iter_stride) {
+ const int col = i + vals_per_iter*tid;
+ const int ib = (row*ncols + col)/qk; // x block index
+ const int iqs = (col%qk)/qr; // x quant index
+ const int iybs = col - col%qk; // y block start index
+
+// processing >2 values per i iter is faster for fast GPUs
+#pragma unroll
+ for (int j = 0; j < vals_per_iter; j += 2) {
+ // process 2 vals per j iter
+
+ // dequantize
+ // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
+ dfloat2 v;
+ dequantize_kernel_reorder((const void *)d_ptr, ib, (const void *)vx, ib * QK4_0 / 2 +iqs+j/qr, v);
+
+ // matrix multiplication
+ // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
+#ifdef GGML_SYCL_F16
+ dfloat2 t1{y[iybs + iqs + j / qr + 0],
+ y[iybs + iqs + j / qr + y_offset]};
+
+ tmp += v * t1;
+#else
+ tmp += v.x() * y[iybs + iqs + j / qr + 0];
+ tmp += v.y() * y[iybs + iqs + j / qr + y_offset];
+#endif // GGML_SYCL_F16
+ }
+ }
+
+ for (; i < ncols; i += iter_stride) {
+ if (tid>=ncols_left/QK4_0) continue;
+ const int col = i + vals_per_iter*tid;
+ const int ib = (row*ncols + col)/qk; // x block index
+ const int iqs = (col%qk)/qr; // x quant index
+ const int iybs = col - col%qk; // y block start index
+
+// processing >2 values per i iter is faster for fast GPUs
+#pragma unroll
+ for (int j = 0; j < vals_per_iter; j += 2) {
+ // process 2 vals per j iter
+
+ // dequantize
+ // for qr = 2 the iqs needs to increase by 1 per j iter because 2 weights per data val
+ dfloat2 v;
+ dequantize_kernel_reorder((const void *)d_ptr, ib, (const void *)vx, ib * QK4_0 / 2 +iqs+j/qr, v);
+
+ // matrix multiplication
+ // for qr = 2 the y index needs to increase by 1 per j iter because of y_offset = qk/2
+#ifdef GGML_SYCL_F16
+ dfloat2 t1{y[iybs + iqs + j / qr + 0],
+ y[iybs + iqs + j / qr + y_offset]};
+
+ tmp += v * t1;
+#else
+ tmp += v.x() * y[iybs + iqs + j / qr + 0];
+ tmp += v.y() * y[iybs + iqs + j / qr + y_offset];
+#endif // GGML_SYCL_F16
+ }
+ }
+
+ // sum up partial sums and write back result
+ const int mask_start = ncols > GGML_SYCL_DMMV_X ? WARP_SIZE >> 1 : WARP_SIZE >> 2;
+ for (int mask = mask_start; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (tid == 0) {
+#ifdef GGML_SYCL_F16
+ dst[row] = tmp.x() + tmp.y();
+#else
+ dst[row] = tmp;
+#endif // GGML_SYCL_F16
+ }
+}
+
+static void convert_mul_mat_vec_f16_sycl(const void *vx, const dfloat *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ dequantize_mul_mat_vec<1, 1, convert_f16>(vx, y, dst, ncols,
+ nrows, item_ct1);
+ });
+ }
+}
+
+/*
+DPCT1110:4: The total declared local variable size in device function
+dequantize_mul_mat_vec_q2_k exceeds 128 bytes and may cause high register
+pressure. Consult with your hardware vendor to find the total register size
+available and adjust the code, or use smaller sub-group size to avoid high
+register pressure.
+*/
+static void dequantize_mul_mat_vec_q2_k(const void *__restrict__ vx,
+ const float *__restrict__ yy,
+ float *__restrict__ dst,
+ const int ncols, int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+
+ static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
+
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+ if (row > nrows) return;
+
+ const int num_blocks_per_row = ncols / QK_K;
+ const int ib0 = row*num_blocks_per_row;
+
+ const block_q2_K * x = (const block_q2_K *)vx + ib0;
+
+ float tmp = 0; // partial sum for thread in warp
+
+#if QK_K == 256
+ const int tid =
+ item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...15
+ const int ix =
+ item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1
+
+ const int step = 16/K_QUANTS_PER_ITERATION;
+
+ const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
+ const int in = tid - step*im; // 0...15 or 0...7
+
+ const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15 or 0...14 in steps of 2
+ const int q_offset = 32*im + l0;
+ const int s_offset = 8*im;
+ const int y_offset = 128*im + l0;
+
+ uint32_t aux[4];
+ const uint8_t * d = (const uint8_t *)aux;
+ const uint8_t * m = (const uint8_t *)(aux + 2);
+
+ for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
+
+ const float * y = yy + i * QK_K + y_offset;
+ const uint8_t * q = x[i].qs + q_offset;
+
+ const float dall = x[i].dm[0];
+ const float dmin = x[i].dm[1];
+
+ const uint32_t * a = (const uint32_t *)(x[i].scales + s_offset);
+ aux[0] = a[0] & 0x0f0f0f0f;
+ aux[1] = a[1] & 0x0f0f0f0f;
+ aux[2] = (a[0] >> 4) & 0x0f0f0f0f;
+ aux[3] = (a[1] >> 4) & 0x0f0f0f0f;
+
+ float sum1 = 0, sum2 = 0;
+ for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
+ sum1 += y[l+ 0] * d[0] * ((q[l+ 0] >> 0) & 3)
+ + y[l+32] * d[2] * ((q[l+ 0] >> 2) & 3)
+ + y[l+64] * d[4] * ((q[l+ 0] >> 4) & 3)
+ + y[l+96] * d[6] * ((q[l+ 0] >> 6) & 3)
+ + y[l+16] * d[1] * ((q[l+16] >> 0) & 3)
+ + y[l+48] * d[3] * ((q[l+16] >> 2) & 3)
+ + y[l+80] * d[5] * ((q[l+16] >> 4) & 3)
+ +y[l+112] * d[7] * ((q[l+16] >> 6) & 3);
+ sum2 += y[l+ 0] * m[0] + y[l+32] * m[2] + y[l+64] * m[4] + y[ l+96] * m[6]
+ + y[l+16] * m[1] + y[l+48] * m[3] + y[l+80] * m[5] + y[l+112] * m[7];
+
+ }
+ tmp += dall * sum1 - dmin * sum2;
+
+ }
+#else
+ const int tid = item_ct1.get_local_id(2) /
+ (2 * K_QUANTS_PER_ITERATION); // 0...15 or 0...7
+ const int ix = item_ct1.get_local_id(2) %
+ (2 * K_QUANTS_PER_ITERATION); // 0....1 or 0...3
+ const int offset = tid * K_QUANTS_PER_ITERATION;
+
+ uint32_t uaux[2];
+ const uint8_t * d = (const uint8_t *)uaux;
+
+
+ for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
+
+ const float * y = yy + i * QK_K + offset;
+ const uint8_t * q = x[i].qs + offset;
+ const uint32_t * s = (const uint32_t *)x[i].scales;
+
+ uaux[0] = s[0] & 0x0f0f0f0f;
+ uaux[1] = (s[0] >> 4) & 0x0f0f0f0f;
+
+ const sycl::float2 dall =
+ x[i].dm.convert<float, sycl::rounding_mode::automatic>();
+
+ float sum1 = 0, sum2 = 0;
+ for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
+ const uint8_t ql = q[l];
+ sum1 += y[l+ 0] * d[0] * ((ql >> 0) & 3)
+ + y[l+16] * d[1] * ((ql >> 2) & 3)
+ + y[l+32] * d[2] * ((ql >> 4) & 3)
+ + y[l+48] * d[3] * ((ql >> 6) & 3);
+ sum2 += y[l+0] * d[4] + y[l+16] * d[5] + y[l+32] * d[6] + y[l+48] * d[7];
+ }
+ tmp += dall.x() * sum1 - dall.y() * sum2;
+ }
+
+#endif
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+/*
+DPCT1110:5: The total declared local variable size in device function
+dequantize_mul_mat_vec_q3_k exceeds 128 bytes and may cause high register
+pressure. Consult with your hardware vendor to find the total register size
+available and adjust the code, or use smaller sub-group size to avoid high
+register pressure.
+*/
+static void dequantize_mul_mat_vec_q3_k(const void *__restrict__ vx,
+ const float *__restrict__ yy,
+ float *__restrict__ dst,
+ const int ncols, int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+ if (row > nrows) return;
+
+ const int num_blocks_per_row = ncols / QK_K;
+ const int ib0 = row*num_blocks_per_row;
+
+ const block_q3_K * x = (const block_q3_K *)vx + ib0;
+
+ float tmp = 0; // partial sum for thread in warp
+
+#if QK_K == 256
+
+ const uint16_t kmask1 = 0x0303;
+ const uint16_t kmask2 = 0x0f0f;
+
+ const int tid =
+ item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16
+ const int ix =
+ item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1
+
+ const int n = K_QUANTS_PER_ITERATION; // iterations in the inner loop
+ const int step = 16/K_QUANTS_PER_ITERATION;
+ const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
+ const int in = tid - step*im; // 0....15 or 0...7
+
+ const uint8_t m = 1 << (4*im);
+
+ const int l0 = n*in; // 0...15 or 0...14 in steps of 2
+ const int q_offset = 32*im + l0;
+ const int y_offset = 128*im + l0;
+
+ uint16_t utmp[4];
+ const int8_t * s = (const int8_t *)utmp;
+
+ const uint16_t s_shift = 4*im;
+
+ for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
+
+ const float * y = yy + i * QK_K + y_offset;
+ const uint8_t * q = x[i].qs + q_offset;
+ const uint8_t * h = x[i].hmask + l0;
+
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ utmp[0] = ((a[0] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 0)) & kmask1) << 4);
+ utmp[1] = ((a[1] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 0)) & kmask1) << 4);
+ utmp[2] = ((a[2] >> s_shift) & kmask2) | (((a[4] >> (s_shift + 2)) & kmask1) << 4);
+ utmp[3] = ((a[3] >> s_shift) & kmask2) | (((a[5] >> (s_shift + 2)) & kmask1) << 4);
+
+ const float d = x[i].d;
+
+ float sum = 0;
+ for (int l = 0; l < n; ++l) {
+ sum += y[l+ 0] * (s[0] - 32) * (((q[l] >> 0) & 3) - (h[l] & (m << 0) ? 0 : 4))
+ + y[l+32] * (s[2] - 32) * (((q[l] >> 2) & 3) - (h[l] & (m << 1) ? 0 : 4))
+ + y[l+64] * (s[4] - 32) * (((q[l] >> 4) & 3) - (h[l] & (m << 2) ? 0 : 4))
+ + y[l+96] * (s[6] - 32) * (((q[l] >> 6) & 3) - (h[l] & (m << 3) ? 0 : 4));
+ sum += y[l+16] * (s[1] - 32) * (((q[l+16] >> 0) & 3) - (h[l+16] & (m << 0) ? 0 : 4))
+ + y[l+48] * (s[3] - 32) * (((q[l+16] >> 2) & 3) - (h[l+16] & (m << 1) ? 0 : 4))
+ + y[l+80] * (s[5] - 32) * (((q[l+16] >> 4) & 3) - (h[l+16] & (m << 2) ? 0 : 4))
+ + y[l+112] * (s[7] - 32) * (((q[l+16] >> 6) & 3) - (h[l+16] & (m << 3) ? 0 : 4));
+ }
+ tmp += d * sum;
+
+ }
+#else
+
+ const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...15 or 0...7
+ const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION); // 0....1 or 0...3
+ const int offset = tid * K_QUANTS_PER_ITERATION; // 0...15 or 0...14
+ const int in = offset/8; // 0 or 1
+ const int im = offset%8; // 0...7
+
+ for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
+
+ const float * y = yy + i * QK_K + offset;
+ const uint8_t * q = x[i].qs + offset;
+ const uint8_t * s = x[i].scales;
+
+ const float dall = (float)x[i].d;
+
+ float sum = 0;
+ for (int l = 0; l < K_QUANTS_PER_ITERATION; ++l) {
+ const uint8_t hl = x[i].hmask[im+l] >> in;
+ const uint8_t ql = q[l];
+ sum += y[l+ 0] * dall * ((s[0] & 0xF) - 8) * ((int8_t)((ql >> 0) & 3) - ((hl >> 0) & 1 ? 0 : 4))
+ + y[l+16] * dall * ((s[0] >> 4) - 8) * ((int8_t)((ql >> 2) & 3) - ((hl >> 2) & 1 ? 0 : 4))
+ + y[l+32] * dall * ((s[1] & 0xF) - 8) * ((int8_t)((ql >> 4) & 3) - ((hl >> 4) & 1 ? 0 : 4))
+ + y[l+48] * dall * ((s[1] >> 4) - 8) * ((int8_t)((ql >> 6) & 3) - ((hl >> 6) & 1 ? 0 : 4));
+ }
+ tmp += sum;
+ }
+#endif
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+/*
+DPCT1110:6: The total declared local variable size in device function
+dequantize_mul_mat_vec_q4_k exceeds 128 bytes and may cause high register
+pressure. Consult with your hardware vendor to find the total register size
+available and adjust the code, or use smaller sub-group size to avoid high
+register pressure.
+*/
+static void dequantize_mul_mat_vec_q4_k(const void *__restrict__ vx,
+ const float *__restrict__ yy,
+ float *__restrict__ dst,
+ const int ncols, int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+ if (row > nrows) return;
+ const int num_blocks_per_row = ncols / QK_K;
+ const int ib0 = row*num_blocks_per_row;
+
+ const block_q4_K * x = (const block_q4_K *)vx + ib0;
+
+#if QK_K == 256
+ const uint16_t kmask1 = 0x3f3f;
+ const uint16_t kmask2 = 0x0f0f;
+ const uint16_t kmask3 = 0xc0c0;
+
+ const int tid =
+ item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16
+ const int ix =
+ item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0,1
+
+ const int step = 8/K_QUANTS_PER_ITERATION; // 8 or 4
+
+ const int il = tid/step; // 0...3
+ const int ir = tid - step*il; // 0...7 or 0...3
+ const int n = 2 * K_QUANTS_PER_ITERATION; // 2 or 4
+
+ const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
+ const int in = il%2;
+
+ const int l0 = n*(2*ir + in);
+ const int q_offset = 32*im + l0;
+ const int y_offset = 64*im + l0;
+
+ uint16_t aux[4];
+ const uint8_t * sc = (const uint8_t *)aux;
+
+#if K_QUANTS_PER_ITERATION == 2
+ uint32_t q32[4];
+ const uint8_t * q4 = (const uint8_t *)q32;
+#else
+ uint16_t q16[4];
+ const uint8_t * q4 = (const uint8_t *)q16;
+#endif
+
+ float tmp = 0; // partial sum for thread in warp
+
+ for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
+
+ const float * y1 = yy + i*QK_K + y_offset;
+ const float * y2 = y1 + 128;
+
+ const float dall = x[i].dm[0];
+ const float dmin = x[i].dm[1];
+
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ aux[0] = a[im+0] & kmask1;
+ aux[1] = a[im+2] & kmask1;
+ aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
+ aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
+
+#if K_QUANTS_PER_ITERATION == 2
+ const uint32_t * q1 = (const uint32_t *)(x[i].qs + q_offset);
+ const uint32_t * q2 = q1 + 16;
+
+ q32[0] = q1[0] & 0x0f0f0f0f;
+ q32[1] = q1[0] & 0xf0f0f0f0;
+ q32[2] = q2[0] & 0x0f0f0f0f;
+ q32[3] = q2[0] & 0xf0f0f0f0;
+
+ sycl::float4 s = {0.f, 0.f, 0.f, 0.f};
+ float smin = 0;
+ for (int l = 0; l < 4; ++l) {
+ s.x() += y1[l] * q4[l + 0]; s.y() += y1[l + 32] * q4[l + 4];
+ s.z() += y2[l] * q4[l + 8]; s.w() += y2[l + 32] * q4[l + 12];
+ smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
+ }
+ tmp += dall * (s.x() * sc[0] + s.y() * sc[1] * 1.f / 16.f +
+ s.z() * sc[4] + s.w() * sc[5] * 1.f / 16.f) -
+ dmin * smin;
+#else
+ const uint16_t * q1 = (const uint16_t *)(x[i].qs + q_offset);
+ const uint16_t * q2 = q1 + 32;
+
+ q16[0] = q1[0] & 0x0f0f;
+ q16[1] = q1[0] & 0xf0f0;
+ q16[2] = q2[0] & 0x0f0f;
+ q16[3] = q2[0] & 0xf0f0;
+
+ float4 s = {0.f, 0.f, 0.f, 0.f};
+ float smin = 0;
+ for (int l = 0; l < 2; ++l) {
+ s.x += y1[l] * q4[l+0]; s.y += y1[l+32] * q4[l+2];
+ s.z += y2[l] * q4[l+4]; s.w += y2[l+32] * q4[l+6];
+ smin += y1[l] * sc[2] + y1[l+32] * sc[3] + y2[l] * sc[6] + y2[l+32] * sc[7];
+ }
+ tmp += dall * (s.x * sc[0] + s.y * sc[1] * 1.f/16.f + s.z * sc[4] + s.w * sc[5] * 1.f/16.f) - dmin * smin;
+#endif
+
+ }
+#else
+ const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...15
+ const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION);
+
+ const int step = tid * K_QUANTS_PER_ITERATION;
+
+ uint16_t aux16[2];
+ const uint8_t * s = (const uint8_t *)aux16;
+
+ float tmp = 0;
+
+ for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
+ const uint8_t * q = x[i].qs + step;
+ const float * y = yy + i*QK_K + step;
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+ const float d = (float)x[i].dm[0];
+ const float m = (float)x[i].dm[1];
+ float sum = 0.f;
+ for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
+ sum += y[j+ 0] * (d * s[0] * (q[j+ 0] & 0xF) - m * s[2])
+ + y[j+16] * (d * s[0] * (q[j+16] & 0xF) - m * s[2])
+ + y[j+32] * (d * s[1] * (q[j+ 0] >> 4) - m * s[3])
+ + y[j+48] * (d * s[1] * (q[j+16] >> 4) - m * s[3]);
+ }
+ tmp += sum;
+ }
+
+#endif
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (tid == 0) {
+ dst[row] = tmp;
+ }
+}
+
+/*
+DPCT1110:7: The total declared local variable size in device function
+dequantize_mul_mat_vec_q5_k exceeds 128 bytes and may cause high register
+pressure. Consult with your hardware vendor to find the total register size
+available and adjust the code, or use smaller sub-group size to avoid high
+register pressure.
+*/
+static void dequantize_mul_mat_vec_q5_k(const void *__restrict__ vx,
+ const float *__restrict__ yy,
+ float *__restrict__ dst,
+ const int ncols,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const int row = item_ct1.get_group(2);
+ const int num_blocks_per_row = ncols / QK_K;
+ const int ib0 = row*num_blocks_per_row;
+
+ const block_q5_K * x = (const block_q5_K *)vx + ib0;
+
+ float tmp = 0; // partial sum for thread in warp
+
+#if QK_K == 256
+ const uint16_t kmask1 = 0x3f3f;
+ const uint16_t kmask2 = 0x0f0f;
+ const uint16_t kmask3 = 0xc0c0;
+
+ const int tid = item_ct1.get_local_id(2) / 2; // 0...15
+ const int ix = item_ct1.get_local_id(2) % 2;
+
+ const int il = tid/4; // 0...3
+ const int ir = tid - 4*il;// 0...3
+ const int n = 2;
+
+ const int im = il/2; // 0 or 1. 0 computes 0,32 + 128,160, 1 computes 64,96 + 192,224
+ const int in = il%2;
+
+ const int l0 = n*(2*ir + in);
+ const int q_offset = 32*im + l0;
+ const int y_offset = 64*im + l0;
+
+ const uint8_t hm1 = 1 << (2*im);
+ const uint8_t hm2 = hm1 << 4;
+
+ uint16_t aux[4];
+ const uint8_t * sc = (const uint8_t *)aux;
+
+ uint16_t q16[8];
+ const uint8_t * q4 = (const uint8_t *)q16;
+
+ for (int i = ix; i < num_blocks_per_row; i += 2) {
+
+ const uint8_t * ql1 = x[i].qs + q_offset;
+ const uint8_t * qh = x[i].qh + l0;
+ const float * y1 = yy + i*QK_K + y_offset;
+ const float * y2 = y1 + 128;
+
+ const float dall = x[i].dm[0];
+ const float dmin = x[i].dm[1];
+
+ const uint16_t * a = (const uint16_t *)x[i].scales;
+ aux[0] = a[im+0] & kmask1;
+ aux[1] = a[im+2] & kmask1;
+ aux[2] = ((a[im+4] >> 0) & kmask2) | ((a[im+0] & kmask3) >> 2);
+ aux[3] = ((a[im+4] >> 4) & kmask2) | ((a[im+2] & kmask3) >> 2);
+
+ sycl::float4 sum = {0.f, 0.f, 0.f, 0.f};
+ float smin = 0;
+ const uint16_t * q1 = (const uint16_t *)ql1;
+ const uint16_t * q2 = q1 + 32;
+ q16[0] = q1[0] & 0x0f0f;
+ q16[1] = q1[8] & 0x0f0f;
+ q16[2] = (q1[0] >> 4) & 0x0f0f;
+ q16[3] = (q1[8] >> 4) & 0x0f0f;
+ q16[4] = q2[0] & 0x0f0f;
+ q16[5] = q2[8] & 0x0f0f;
+ q16[6] = (q2[0] >> 4) & 0x0f0f;
+ q16[7] = (q2[8] >> 4) & 0x0f0f;
+ for (int l = 0; l < n; ++l) {
+ sum.x() +=
+ y1[l + 0] * (q4[l + 0] + (qh[l + 0] & (hm1 << 0) ? 16 : 0)) +
+ y1[l + 16] * (q4[l + 2] + (qh[l + 16] & (hm1 << 0) ? 16 : 0));
+ sum.y() +=
+ y1[l + 32] * (q4[l + 4] + (qh[l + 0] & (hm1 << 1) ? 16 : 0)) +
+ y1[l + 48] * (q4[l + 6] + (qh[l + 16] & (hm1 << 1) ? 16 : 0));
+ sum.z() +=
+ y2[l + 0] * (q4[l + 8] + (qh[l + 0] & (hm2 << 0) ? 16 : 0)) +
+ y2[l + 16] * (q4[l + 10] + (qh[l + 16] & (hm2 << 0) ? 16 : 0));
+ sum.w() +=
+ y2[l + 32] * (q4[l + 12] + (qh[l + 0] & (hm2 << 1) ? 16 : 0)) +
+ y2[l + 48] * (q4[l + 14] + (qh[l + 16] & (hm2 << 1) ? 16 : 0));
+ smin += (y1[l] + y1[l+16]) * sc[2] + (y1[l+32] + y1[l+48]) * sc[3]
+ + (y2[l] + y2[l+16]) * sc[6] + (y2[l+32] + y2[l+48]) * sc[7];
+ }
+ tmp += dall * (sum.x() * sc[0] + sum.y() * sc[1] + sum.z() * sc[4] +
+ sum.w() * sc[5]) -
+ dmin * smin;
+ }
+
+#else
+ const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...15
+ const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION);
+ const int step = tid * K_QUANTS_PER_ITERATION;
+ const int im = step/8;
+ const int in = step%8;
+
+ for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
+ const uint8_t * q = x[i].qs + step;
+ const int8_t * s = x[i].scales;
+ const float * y = yy + i*QK_K + step;
+ const float d = x[i].d;
+ float sum = 0.f;
+ for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
+ const uint8_t h = x[i].qh[in+j] >> im;
+ sum += y[j+ 0] * d * s[0] * ((q[j+ 0] & 0xF) - ((h >> 0) & 1 ? 0 : 16))
+ + y[j+16] * d * s[1] * ((q[j+16] & 0xF) - ((h >> 2) & 1 ? 0 : 16))
+ + y[j+32] * d * s[2] * ((q[j+ 0] >> 4) - ((h >> 4) & 1 ? 0 : 16))
+ + y[j+48] * d * s[3] * ((q[j+16] >> 4) - ((h >> 6) & 1 ? 0 : 16));
+ }
+ tmp += sum;
+ }
+#endif
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+static void dequantize_mul_mat_vec_q6_k(const void * __restrict__ vx, const float * __restrict__ yy, float * __restrict__ dst, const int ncols, int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+
+ static_assert(16%K_QUANTS_PER_ITERATION == 0, "16 must be divisible by K_QUANTS_PER_ITERATION");
+
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+ if (row > nrows) return;
+
+ const int num_blocks_per_row = ncols / QK_K;
+ const int ib0 = row*num_blocks_per_row;
+
+ const block_q6_K * x = (const block_q6_K *)vx + ib0;
+
+#if QK_K == 256
+
+ const int tid =
+ item_ct1.get_local_id(2) / K_QUANTS_PER_ITERATION; // 0...31 or 0...16
+ const int ix =
+ item_ct1.get_local_id(2) % K_QUANTS_PER_ITERATION; // 0 or 0, 1
+
+ const int step = 16/K_QUANTS_PER_ITERATION; // 16 or 8
+
+ const int im = tid/step; // 0 or 1. 0 computes 0..., 1 computes 128...
+ const int in = tid - step*im; // 0...15 or 0...7
+
+#if K_QUANTS_PER_ITERATION == 1
+ const int l0 = K_QUANTS_PER_ITERATION*in; // 0...15
+ const int is = 0;
+#else
+ const int l0 = 4 * in; // 0, 4, 8, ..., 28
+ const int is = in / 4;
+#endif
+ const int ql_offset = 64*im + l0;
+ const int qh_offset = 32*im + l0;
+ const int s_offset = 8*im + is;
+ const int y_offset = 128*im + l0;
+
+ float tmp = 0; // partial sum for thread in warp
+
+ for (int i = ix; i < num_blocks_per_row; i += K_QUANTS_PER_ITERATION) {
+
+ const float * y = yy + i * QK_K + y_offset;
+ const uint8_t * ql = x[i].ql + ql_offset;
+ const uint8_t * qh = x[i].qh + qh_offset;
+ const int8_t * s = x[i].scales + s_offset;
+
+ const float d = x[i].d;
+
+#if K_QUANTS_PER_ITERATION == 1
+ float sum = y[ 0] * s[0] * d * ((int8_t)((ql[ 0] & 0xF) | ((qh[ 0] & 0x03) << 4)) - 32)
+ + y[16] * s[1] * d * ((int8_t)((ql[16] & 0xF) | ((qh[16] & 0x03) << 4)) - 32)
+ + y[32] * s[2] * d * ((int8_t)((ql[32] & 0xF) | ((qh[ 0] & 0x0c) << 2)) - 32)
+ + y[48] * s[3] * d * ((int8_t)((ql[48] & 0xF) | ((qh[16] & 0x0c) << 2)) - 32)
+ + y[64] * s[4] * d * ((int8_t)((ql[ 0] >> 4) | ((qh[ 0] & 0x30) >> 0)) - 32)
+ + y[80] * s[5] * d * ((int8_t)((ql[16] >> 4) | ((qh[16] & 0x30) >> 0)) - 32)
+ + y[96] * s[6] * d * ((int8_t)((ql[32] >> 4) | ((qh[ 0] & 0xc0) >> 2)) - 32)
+ +y[112] * s[7] * d * ((int8_t)((ql[48] >> 4) | ((qh[16] & 0xc0) >> 2)) - 32);
+ tmp += sum;
+#else
+ float sum = 0;
+ for (int l = 0; l < 4; ++l) {
+ sum += y[l+ 0] * s[0] * d * ((int8_t)((ql[l+ 0] & 0xF) | (((qh[l] >> 0) & 3) << 4)) - 32)
+ + y[l+32] * s[2] * d * ((int8_t)((ql[l+32] & 0xF) | (((qh[l] >> 2) & 3) << 4)) - 32)
+ + y[l+64] * s[4] * d * ((int8_t)((ql[l+ 0] >> 4) | (((qh[l] >> 4) & 3) << 4)) - 32)
+ + y[l+96] * s[6] * d * ((int8_t)((ql[l+32] >> 4) | (((qh[l] >> 6) & 3) << 4)) - 32);
+ }
+ tmp += sum;
+#endif
+
+ }
+
+#else
+
+ const int tid = item_ct1.get_local_id(2)/(2*K_QUANTS_PER_ITERATION); // 0...7
+ const int ix = item_ct1.get_local_id(2)%(2*K_QUANTS_PER_ITERATION); // 0...3
+
+ const int step = tid * K_QUANTS_PER_ITERATION;
+
+ float tmp = 0; // partial sum for thread in warp
+
+ for (int i = ix; i < num_blocks_per_row; i += 2*K_QUANTS_PER_ITERATION) {
+
+ const float * y = yy + i * QK_K + step;
+ const uint8_t * ql = x[i].ql + step;
+ const uint8_t * qh = x[i].qh + step;
+ const int8_t * s = x[i].scales;
+
+ const float d = x[i+0].d;
+
+ float sum = 0;
+ for (int j = 0; j < K_QUANTS_PER_ITERATION; ++j) {
+ sum += y[j+ 0] * s[0] * d * ((int8_t)((ql[j+ 0] & 0xF) | ((qh[j] & 0x03) << 4)) - 32)
+ + y[j+16] * s[1] * d * ((int8_t)((ql[j+16] & 0xF) | ((qh[j] & 0x0c) << 2)) - 32)
+ + y[j+32] * s[2] * d * ((int8_t)((ql[j+ 0] >> 4) | ((qh[j] & 0x30) >> 0)) - 32)
+ + y[j+48] * s[3] * d * ((int8_t)((ql[j+16] >> 4) | ((qh[j] & 0xc0) >> 2)) - 32);
+ }
+ tmp += sum;
+
+ }
+
+#endif
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = QK_WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (tid == 0) {
+ dst[row] = tmp;
+ }
+}
+
+static void dequantize_mul_mat_vec_q4_0_sycl_reorder(const void *vx, const dfloat *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ dequantize_mul_mat_vec_reorder<QK4_0, QR4_0, dequantize_q4_0_reorder>(
+ vx, y, dst, ncols, nrows, item_ct1);
+ });
+ }
+}
+
+
+static void dequantize_mul_mat_vec_q4_0_sycl(const void *vx, const dfloat *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ // the number of rows may exceed maximum grid size in the y or z dimensions, use the x dimension instead
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ dequantize_mul_mat_vec<QK4_0, QR4_0, dequantize_q4_0>(
+ vx, y, dst, ncols, nrows, item_ct1);
+ });
+ }
+}
+
+static void dequantize_mul_mat_vec_q4_1_sycl(const void *vx, const dfloat *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ dequantize_mul_mat_vec<QK4_1, QR4_1, dequantize_q4_1>(
+ vx, y, dst, ncols, nrows, item_ct1);
+ });
+ }
+}
+
+static void dequantize_mul_mat_vec_q5_0_sycl(const void *vx, const dfloat *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ dequantize_mul_mat_vec<QK5_0, QR5_0, dequantize_q5_0>(
+ vx, y, dst, ncols, nrows, item_ct1);
+ });
+ }
+}
+
+static void dequantize_mul_mat_vec_q5_1_sycl(const void *vx, const dfloat *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ dequantize_mul_mat_vec<QK5_1, QR5_1, dequantize_q5_1>(
+ vx, y, dst, ncols, nrows, item_ct1);
+ });
+ }
+}
+
+static void dequantize_mul_mat_vec_q8_0_sycl(const void *vx, const dfloat *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % GGML_SYCL_DMMV_X == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ dequantize_mul_mat_vec<QK8_0, QR8_0, dequantize_q8_0>(
+ vx, y, dst, ncols, nrows, item_ct1);
+ });
+ }
+}
+
+static void dequantize_mul_mat_vec_q2_K_sycl(const void *vx, const float *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int ny = 2; // very slightly faster than 1 even when K_QUANTS_PER_ITERATION = 2
+ const int block_num_y = (nrows + ny - 1) / ny;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] {
+ dequantize_mul_mat_vec_q2_k(vx, y, dst, ncols, nrows, item_ct1);
+ });
+}
+
+static void dequantize_mul_mat_vec_q3_K_sycl(const void *vx, const float *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int ny = 2 / K_QUANTS_PER_ITERATION;
+ const int block_num_y = (nrows + ny - 1) / ny;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] {
+ dequantize_mul_mat_vec_q3_k(vx, y, dst, ncols, nrows, item_ct1);
+ });
+}
+
+static void dequantize_mul_mat_vec_q4_K_sycl(const void *vx, const float *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int ny = 2 / K_QUANTS_PER_ITERATION;
+ const int block_num_y = (nrows + ny - 1) / ny;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] {
+ dequantize_mul_mat_vec_q4_k(vx, y, dst, ncols, nrows, item_ct1);
+ });
+}
+
+static void dequantize_mul_mat_vec_q5_K_sycl(const void *vx, const float *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const sycl::range<3> block_dims(1, 1, QK_WARP_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] {
+ dequantize_mul_mat_vec_q5_k(vx, y, dst, ncols, item_ct1);
+ });
+}
+
+static void dequantize_mul_mat_vec_q6_K_sycl(const void *vx, const float *y,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int ny = 2 / K_QUANTS_PER_ITERATION;
+ const int block_num_y = (nrows + ny - 1) / ny;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, ny, QK_WARP_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(QK_WARP_SIZE)]] {
+ dequantize_mul_mat_vec_q6_k(vx, y, dst, ncols, nrows, item_ct1);
+ });
+}
+
+void ggml_sycl_op_dequantize_mul_mat_vec(
+ ggml_backend_sycl_context & ctx,
+ const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
+ const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
+ float *dst_dd_i, const int64_t row_low, const int64_t row_high,
+ const int64_t src1_ncols, const int64_t src1_padded_row_size,
+ const dpct::queue_ptr &stream) {
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t row_diff = row_high - row_low;
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ // on some GPUs it is faster to convert src1 to half and to use half precision intrinsics
+#ifdef GGML_SYCL_F16
+ ggml_sycl_pool_alloc<sycl::half> src1_dfloat_a(ctx.pool());
+ sycl::half *src1_dfloat = nullptr; // dfloat == half
+
+ bool src1_convert_f16 =
+ src0->type == GGML_TYPE_Q4_0 || src0->type == GGML_TYPE_Q4_1 ||
+ src0->type == GGML_TYPE_Q5_0 || src0->type == GGML_TYPE_Q5_1 ||
+ src0->type == GGML_TYPE_Q8_0 || src0->type == GGML_TYPE_F16;
+
+ if (src1_convert_f16) {
+ scope_op_debug_print scope_dbg_print(__func__, "/to_fp16_sycl", dst, /*num_src=*/2,
+ " : converting src1 to fp16");
+ src1_dfloat = src1_dfloat_a.alloc(ne00);
+ const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type, dst);
+ GGML_ASSERT(to_fp16_sycl != nullptr);
+ to_fp16_sycl(src1_ddf_i, src1_dfloat, ne00, stream);
+ }
+#else
+ const dfloat * src1_dfloat = (const dfloat *) src1_ddf_i; // dfloat == float, no conversion
+#endif // GGML_SYCL_F16
+
+ switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ if ((ggml_tensor_extra_gpu*)dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu*)dst->src[0]->extra)->optimized_feature.reorder) {
+ dequantize_mul_mat_vec_q4_0_sycl_reorder(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ } else {
+ dequantize_mul_mat_vec_q4_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ }
+ break;
+ case GGML_TYPE_Q4_1:
+ dequantize_mul_mat_vec_q4_1_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_0:
+ dequantize_mul_mat_vec_q5_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_1:
+ dequantize_mul_mat_vec_q5_1_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q8_0:
+ dequantize_mul_mat_vec_q8_0_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q2_K:
+ dequantize_mul_mat_vec_q2_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q3_K:
+ dequantize_mul_mat_vec_q3_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q4_K:
+ if ((ggml_tensor_extra_gpu *) dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) {
+ // reorder is currently not supported for dmmv
+ GGML_ABORT("Unimplemented dequantize case case for q4_k reorder");
+ } else {
+ dequantize_mul_mat_vec_q4_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ }
+ break;
+ case GGML_TYPE_Q5_K:
+ dequantize_mul_mat_vec_q5_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q6_K:
+ dequantize_mul_mat_vec_q6_K_sycl(src0_dd_i, src1_ddf_i, dst_dd_i, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_F16:
+ convert_mul_mat_vec_f16_sycl(src0_dd_i, src1_dfloat, dst_dd_i, ne00, row_diff, stream);
+ break;
+ default:
+ printf("ggml_sycl_op_dequantize_mul_mat_vec unsupported GGML_TYPE %d\n", src0->type);
+ GGML_ABORT("fatal error");
+ }
+
+ GGML_UNUSED(src1);
+ GGML_UNUSED(dst);
+ GGML_UNUSED(src1_ddq_i);
+ GGML_UNUSED(src1_ncols);
+ GGML_UNUSED(src1_padded_row_size);
+ GGML_UNUSED(ctx);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp b/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp
new file mode 100644
index 0000000..bd83735
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/dmmv.hpp
@@ -0,0 +1,27 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_DMMV_HPP
+#define GGML_SYCL_DMMV_HPP
+
+#include "common.hpp"
+
+
+void ggml_sycl_op_dequantize_mul_mat_vec(
+ ggml_backend_sycl_context & ctx,
+ const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
+ const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
+ float *dst_dd_i, const int64_t row_low, const int64_t row_high,
+ const int64_t src1_ncols, const int64_t src1_padded_row_size,
+ const dpct::queue_ptr &stream);
+
+#endif // GGML_SYCL_DMMV_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp b/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp
new file mode 100644
index 0000000..ece66a7
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/dpct/helper.hpp
@@ -0,0 +1,3002 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_DPCT_HELPER_HPP
+#define GGML_SYCL_DPCT_HELPER_HPP
+
+#include <sycl/sycl.hpp>
+#include <sycl/half_type.hpp>
+#include <oneapi/mkl.hpp>
+
+#include <map>
+
+#include "ggml.h"
+
+#if defined(__linux__)
+#include <sys/mman.h>
+#elif defined(_WIN64)
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#include <windows.h>
+#else
+#error "Only support Windows and Linux."
+#endif
+
+#if defined(__linux__)
+#include <unistd.h>
+#include <sys/syscall.h>
+#endif
+#if defined(_WIN64)
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#include <windows.h>
+#endif
+
+#define DPCT_COMPATIBILITY_TEMP (900)
+
+#if defined(_MSC_VER)
+#define __dpct_align__(n) __declspec(align(n))
+#define __dpct_inline__ __forceinline
+#else
+#define __dpct_align__(n) __attribute__((aligned(n)))
+#define __dpct_inline__ __inline__ __attribute__((always_inline))
+#endif
+
+#if defined(_MSC_VER)
+#define __dpct_noinline__ __declspec(noinline)
+#else
+#define __dpct_noinline__ __attribute__((noinline))
+#endif
+
+inline std::string get_device_type_name(const sycl::device &Device) {
+ auto DeviceType = Device.get_info<sycl::info::device::device_type>();
+ switch (DeviceType) {
+ case sycl::info::device_type::cpu:
+ return "cpu";
+ case sycl::info::device_type::gpu:
+ return "gpu";
+ case sycl::info::device_type::host:
+ return "host";
+ case sycl::info::device_type::accelerator:
+ return "acc";
+ default:
+ return "unknown";
+ }
+}
+
+inline std::string get_device_backend_and_type(const sycl::device &device) {
+ std::stringstream device_type;
+ sycl::backend backend = device.get_backend();
+ device_type << backend << ":" << get_device_type_name(device);
+ return device_type.str();
+}
+
+template <typename Ts> struct matrix_info_t {
+ oneapi::mkl::transpose transpose_info[2];
+ Ts value_info[2];
+ std::int64_t size_info[3];
+ std::int64_t ld_info[3];
+ std::int64_t groupsize_info;
+};
+
+namespace dpct
+{
+ typedef sycl::queue *queue_ptr;
+ typedef sycl::event *event_ptr;
+ typedef char *device_ptr;
+ typedef uint8_t byte_t;
+ typedef sycl::buffer<byte_t> buffer_t;
+
+ /// SYCL default exception handler
+ inline auto exception_handler = [](sycl::exception_list exceptions)
+ {
+ for (std::exception_ptr const &e : exceptions)
+ {
+ try
+ {
+ std::rethrow_exception(e);
+ }
+ catch (sycl::exception const &e)
+ {
+ std::cerr << "Caught asynchronous SYCL exception:" << std::endl
+ << e.what() << std::endl
+ << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ }
+ }
+ };
+
+ enum error_code
+ {
+ success = 0,
+ default_error = 999
+ };
+
+ enum memcpy_direction
+ {
+ host_to_host,
+ host_to_device,
+ device_to_host,
+ device_to_device,
+ automatic
+ };
+
+ enum memory_region
+ {
+ global = 0, // device global memory
+ constant, // device constant memory
+ local, // device local memory
+ shared, // memory which can be accessed by host and device
+ };
+
+ enum class library_data_t : unsigned char
+ {
+ real_float = 0,
+ complex_float,
+ real_double,
+ complex_double,
+ real_half,
+ complex_half,
+ real_bfloat16,
+ complex_bfloat16,
+ real_int4,
+ complex_int4,
+ real_uint4,
+ complex_uint4,
+ real_int8,
+ complex_int8,
+ real_uint8,
+ complex_uint8,
+ real_int16,
+ complex_int16,
+ real_uint16,
+ complex_uint16,
+ real_int32,
+ complex_int32,
+ real_uint32,
+ complex_uint32,
+ real_int64,
+ complex_int64,
+ real_uint64,
+ complex_uint64,
+ real_int8_4,
+ real_int8_32,
+ real_uint8_4,
+ library_data_t_size
+ };
+
+ template <typename T>
+ struct DataType
+ {
+ using T2 = T;
+ };
+ template <typename T>
+ struct DataType<sycl::vec<T, 2>>
+ {
+ using T2 = std::complex<T>;
+ };
+
+ static void destroy_event(event_ptr event)
+ {
+ delete event;
+ }
+
+ static inline unsigned int get_tid()
+ {
+#if defined(__linux__)
+ return syscall(SYS_gettid);
+#elif defined(_WIN64)
+ return GetCurrentThreadId();
+#else
+#error "Only support Windows and Linux."
+#endif
+ }
+
+ namespace detail
+ {
+ static void get_version(const sycl::device &dev, int &major, int &minor)
+ {
+ // Version string has the following format:
+ // a. OpenCL<space><major.minor><space><vendor-specific-information>
+ // b. <major.minor>
+ // c. <AmdGcnArchName> e.g gfx1030
+ std::string ver;
+ ver = dev.get_info<sycl::info::device::version>();
+ std::string::size_type i = 0;
+ while (i < ver.size()) {
+ if (isdigit(ver[i]))
+ break;
+ i++;
+ }
+ major = std::stoi(&(ver[i]));
+ while (i < ver.size()) {
+ if (ver[i] == '.')
+ break;
+ i++;
+ }
+ if (i < ver.size()) {
+ // a. and b.
+ i++;
+ minor = std::stoi(&(ver[i]));
+ } else {
+ // c.
+ minor = 0;
+ }
+ }
+
+ template <typename tag, typename T>
+ class generic_error_type
+ {
+ public:
+ generic_error_type() = default;
+ generic_error_type(T value) : value{value} {}
+ operator T() const { return value; }
+
+ private:
+ T value;
+ };
+
+ } // namespace detail
+
+ // COPY from DPCT head files
+ /// dim3 is used to store 3 component dimensions.
+ class dim3 {
+ public:
+ unsigned x, y, z;
+
+ constexpr dim3(unsigned x = 1, unsigned y = 1, unsigned z = 1)
+ : x(x), y(y), z(z) {}
+
+ dim3(const sycl::id<3> &r) : dim3(r[2], r[1], r[0]) {}
+
+ operator sycl::range<3>() const { return sycl::range<3>(z, y, x); }
+ }; // namespace dim3
+
+ inline dim3 operator*(const dim3 &a, const dim3 &b) {
+ return dim3{a.x * b.x, a.y * b.y, a.z * b.z};
+ }
+ // COPY from DPCT head files
+
+
+ /// Pitched 2D/3D memory data.
+ class pitched_data
+ {
+ public:
+ pitched_data() : pitched_data(nullptr, 0, 0, 0) {}
+ pitched_data(void *data, size_t pitch, size_t x, size_t y)
+ : _data(data), _pitch(pitch), _x(x), _y(y) {}
+
+ void *get_data_ptr() { return _data; }
+ void set_data_ptr(void *data) { _data = data; }
+
+ size_t get_pitch() { return _pitch; }
+ void set_pitch(size_t pitch) { _pitch = pitch; }
+
+ size_t get_x() { return _x; }
+ void set_x(size_t x) { _x = x; }
+
+ size_t get_y() { return _y; }
+ void set_y(size_t y) { _y = y; }
+
+ private:
+ void *_data;
+ size_t _pitch, _x, _y;
+ };
+
+ class device_info
+ {
+ public:
+ // get interface
+ const char *get_name() const { return _name; }
+ char *get_name() { return _name; }
+ template <typename WorkItemSizesTy = sycl::range<3>,
+ std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::range<3>> ||
+ std::is_same_v<WorkItemSizesTy, int *>,
+ int> = 0>
+ auto get_max_work_item_sizes() const
+ {
+ if constexpr (std::is_same_v<WorkItemSizesTy, sycl::range<3>>)
+ return sycl::range<3>(_max_work_item_sizes_i[0],
+ _max_work_item_sizes_i[1],
+ _max_work_item_sizes_i[2]);
+ else
+ {
+ return _max_work_item_sizes_i;
+ }
+ }
+ template <typename WorkItemSizesTy = sycl::range<3>,
+ std::enable_if_t<std::is_same_v<WorkItemSizesTy, sycl::range<3>> ||
+ std::is_same_v<WorkItemSizesTy, int *>,
+ int> = 0>
+ auto get_max_work_item_sizes()
+ {
+ if constexpr (std::is_same_v<WorkItemSizesTy, sycl::range<3>>)
+ return sycl::range<3>(_max_work_item_sizes_i[0],
+ _max_work_item_sizes_i[1],
+ _max_work_item_sizes_i[2]);
+ else
+ {
+ return _max_work_item_sizes_i;
+ }
+ }
+ bool get_host_unified_memory() const { return _host_unified_memory; }
+ int get_major_version() const { return _major; }
+ int get_minor_version() const { return _minor; }
+ int get_integrated() const { return _integrated; }
+ int get_max_clock_frequency() const { return _frequency; }
+ int get_max_compute_units() const { return _max_compute_units; }
+ int get_max_work_group_size() const { return _max_work_group_size; }
+ int get_max_sub_group_size() const { return _max_sub_group_size; }
+ int get_max_work_items_per_compute_unit() const
+ {
+ return _max_work_items_per_compute_unit;
+ }
+ int get_max_register_size_per_work_group() const
+ {
+ return _max_register_size_per_work_group;
+ }
+ template <typename NDRangeSizeTy = size_t *,
+ std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
+ std::is_same_v<NDRangeSizeTy, int *>,
+ int> = 0>
+ auto get_max_nd_range_size() const
+ {
+ if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
+ return _max_nd_range_size;
+ else
+ return _max_nd_range_size_i;
+ }
+ template <typename NDRangeSizeTy = size_t *,
+ std::enable_if_t<std::is_same_v<NDRangeSizeTy, size_t *> ||
+ std::is_same_v<NDRangeSizeTy, int *>,
+ int> = 0>
+ auto get_max_nd_range_size()
+ {
+ if constexpr (std::is_same_v<NDRangeSizeTy, size_t *>)
+ return _max_nd_range_size;
+ else
+ return _max_nd_range_size_i;
+ }
+ size_t get_global_mem_size() const { return _global_mem_size; }
+ size_t get_local_mem_size() const { return _local_mem_size; }
+ size_t get_max_mem_alloc_size() const { return _max_mem_alloc_size; }
+ /// Returns the maximum clock rate of device's global memory in kHz. If
+ /// compiler does not support this API then returns default value 3200000 kHz.
+ unsigned int get_memory_clock_rate() const { return _memory_clock_rate; }
+ /// Returns the maximum bus width between device and memory in bits. If
+ /// compiler does not support this API then returns default value 64 bits.
+ unsigned int get_memory_bus_width() const { return _memory_bus_width; }
+ uint32_t get_device_id() const { return _device_id; }
+ std::array<unsigned char, 16> get_uuid() const { return _uuid; }
+ /// Returns global memory cache size in bytes.
+ unsigned int get_global_mem_cache_size() const
+ {
+ return _global_mem_cache_size;
+ }
+
+ // set interface
+ void set_name(const char *name)
+ {
+ size_t length = strlen(name);
+ if (length < 256)
+ {
+ std::memcpy(_name, name, length + 1);
+ }
+ else
+ {
+ std::memcpy(_name, name, 255);
+ _name[255] = '\0';
+ }
+ }
+ void set_max_work_item_sizes(const sycl::range<3> max_work_item_sizes)
+ {
+ for (int i = 0; i < 3; ++i)
+ _max_work_item_sizes_i[i] = max_work_item_sizes[i];
+ }
+ [[deprecated]] void
+ set_max_work_item_sizes(const sycl::id<3> max_work_item_sizes)
+ {
+ for (int i = 0; i < 3; ++i)
+ {
+ _max_work_item_sizes_i[i] = max_work_item_sizes[i];
+ }
+ }
+ void set_host_unified_memory(bool host_unified_memory)
+ {
+ _host_unified_memory = host_unified_memory;
+ }
+ void set_major_version(int major) { _major = major; }
+ void set_minor_version(int minor) { _minor = minor; }
+ void set_integrated(int integrated) { _integrated = integrated; }
+ void set_max_clock_frequency(int frequency) { _frequency = frequency; }
+ void set_max_compute_units(int max_compute_units)
+ {
+ _max_compute_units = max_compute_units;
+ }
+ void set_global_mem_size(size_t global_mem_size)
+ {
+ _global_mem_size = global_mem_size;
+ }
+ void set_local_mem_size(size_t local_mem_size)
+ {
+ _local_mem_size = local_mem_size;
+ }
+ void set_max_mem_alloc_size(size_t max_mem_alloc_size)
+ {
+ _max_mem_alloc_size = max_mem_alloc_size;
+ }
+ void set_max_work_group_size(int max_work_group_size)
+ {
+ _max_work_group_size = max_work_group_size;
+ }
+ void set_max_sub_group_size(int max_sub_group_size)
+ {
+ _max_sub_group_size = max_sub_group_size;
+ }
+ void
+ set_max_work_items_per_compute_unit(int max_work_items_per_compute_unit)
+ {
+ _max_work_items_per_compute_unit = max_work_items_per_compute_unit;
+ }
+ void set_max_nd_range_size(int max_nd_range_size[])
+ {
+ for (int i = 0; i < 3; i++)
+ {
+ _max_nd_range_size[i] = max_nd_range_size[i];
+ _max_nd_range_size_i[i] = max_nd_range_size[i];
+ }
+ }
+ void set_memory_clock_rate(unsigned int memory_clock_rate)
+ {
+ _memory_clock_rate = memory_clock_rate;
+ }
+ void set_memory_bus_width(unsigned int memory_bus_width)
+ {
+ _memory_bus_width = memory_bus_width;
+ }
+ void
+ set_max_register_size_per_work_group(int max_register_size_per_work_group)
+ {
+ _max_register_size_per_work_group = max_register_size_per_work_group;
+ }
+ void set_device_id(uint32_t device_id)
+ {
+ _device_id = device_id;
+ }
+ void set_uuid(std::array<unsigned char, 16> uuid)
+ {
+ _uuid = std::move(uuid);
+ }
+ void set_global_mem_cache_size(unsigned int global_mem_cache_size)
+ {
+ _global_mem_cache_size = global_mem_cache_size;
+ }
+
+ private:
+ char _name[256];
+ int _max_work_item_sizes_i[3];
+ bool _host_unified_memory = false;
+ int _major;
+ int _minor;
+ int _integrated = 0;
+ int _frequency;
+ // Set estimated value 3200000 kHz as default value.
+ unsigned int _memory_clock_rate = 3200000;
+ // Set estimated value 64 bits as default value.
+ unsigned int _memory_bus_width = 64;
+ unsigned int _global_mem_cache_size;
+ int _max_compute_units;
+ int _max_work_group_size;
+ int _max_sub_group_size;
+ int _max_work_items_per_compute_unit;
+ int _max_register_size_per_work_group;
+ size_t _global_mem_size;
+ size_t _local_mem_size;
+ size_t _max_mem_alloc_size;
+ size_t _max_nd_range_size[3];
+ int _max_nd_range_size_i[3];
+ uint32_t _device_id;
+ std::array<unsigned char, 16> _uuid;
+ };
+
+ static int get_major_version(const sycl::device &dev)
+ {
+ int major, minor;
+ detail::get_version(dev, major, minor);
+ return major;
+ }
+
+ static int get_minor_version(const sycl::device &dev)
+ {
+ int major, minor;
+ detail::get_version(dev, major, minor);
+ return minor;
+ }
+
+ static void get_device_info(device_info &out, const sycl::device &dev)
+ {
+ device_info prop;
+ prop.set_name(dev.get_info<sycl::info::device::name>().c_str());
+
+ int major, minor;
+ detail::get_version(dev, major, minor);
+ prop.set_major_version(major);
+ prop.set_minor_version(minor);
+
+ prop.set_max_work_item_sizes(
+#if (__SYCL_COMPILER_VERSION && __SYCL_COMPILER_VERSION < 20220902)
+ // oneAPI DPC++ compiler older than 2022/09/02, where max_work_item_sizes
+ // is an enum class element
+ dev.get_info<sycl::info::device::max_work_item_sizes>());
+#else
+ // SYCL 2020-conformant code, max_work_item_sizes is a struct templated by
+ // an int
+ dev.get_info<sycl::info::device::max_work_item_sizes<3>>());
+#endif
+ prop.set_host_unified_memory(dev.has(sycl::aspect::usm_host_allocations));
+
+ prop.set_max_clock_frequency(
+ dev.get_info<sycl::info::device::max_clock_frequency>() * 1000);
+
+ prop.set_max_compute_units(
+ dev.get_info<sycl::info::device::max_compute_units>());
+ prop.set_max_work_group_size(
+ dev.get_info<sycl::info::device::max_work_group_size>());
+ prop.set_global_mem_size(dev.get_info<sycl::info::device::global_mem_size>());
+ prop.set_local_mem_size(dev.get_info<sycl::info::device::local_mem_size>());
+ prop.set_max_mem_alloc_size(dev.get_info<sycl::info::device::max_mem_alloc_size>());
+
+#if (defined(SYCL_EXT_INTEL_DEVICE_INFO) && SYCL_EXT_INTEL_DEVICE_INFO >= 6)
+ if (dev.has(sycl::aspect::ext_intel_memory_clock_rate))
+ {
+ unsigned int tmp =
+ dev.get_info<sycl::ext::intel::info::device::memory_clock_rate>();
+ if (tmp != 0)
+ prop.set_memory_clock_rate(1000 * tmp);
+ }
+ if (dev.has(sycl::aspect::ext_intel_memory_bus_width))
+ {
+ prop.set_memory_bus_width(
+ dev.get_info<sycl::ext::intel::info::device::memory_bus_width>());
+ }
+ if (dev.has(sycl::aspect::ext_intel_device_id))
+ {
+ prop.set_device_id(
+ dev.get_info<sycl::ext::intel::info::device::device_id>());
+ }
+ if (dev.has(sycl::aspect::ext_intel_device_info_uuid))
+ {
+ prop.set_uuid(dev.get_info<sycl::ext::intel::info::device::uuid>());
+ }
+#elif defined(_MSC_VER) && !defined(__clang__)
+#pragma message("get_device_info: querying memory_clock_rate and \
+ memory_bus_width are not supported by the compiler used. \
+ Use 3200000 kHz as memory_clock_rate default value. \
+ Use 64 bits as memory_bus_width default value.")
+#else
+#warning "get_device_info: querying memory_clock_rate and \
+ memory_bus_width are not supported by the compiler used. \
+ Use 3200000 kHz as memory_clock_rate default value. \
+ Use 64 bits as memory_bus_width default value."
+#endif
+
+ size_t max_sub_group_size = 1;
+ std::vector<size_t> sub_group_sizes =
+ dev.get_info<sycl::info::device::sub_group_sizes>();
+
+ for (const auto &sub_group_size : sub_group_sizes)
+ {
+ if (max_sub_group_size < sub_group_size)
+ max_sub_group_size = sub_group_size;
+ }
+
+ prop.set_max_sub_group_size(max_sub_group_size);
+
+ prop.set_max_work_items_per_compute_unit(
+ dev.get_info<sycl::info::device::max_work_group_size>());
+ int max_nd_range_size[] = {0x7FFFFFFF, 0x7FFFFFFF, 0x7FFFFFFF};
+ prop.set_max_nd_range_size(max_nd_range_size);
+
+ // Estimates max register size per work group, feel free to update the value
+ // according to device properties.
+ prop.set_max_register_size_per_work_group(65536);
+
+ prop.set_global_mem_cache_size(
+ dev.get_info<sycl::info::device::global_mem_cache_size>());
+ out = prop;
+ }
+
+ /// dpct device extension
+ class device_ext : public sycl::device {
+ typedef std::mutex mutex_type;
+
+ public:
+ device_ext() : sycl::device() {}
+ ~device_ext() {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ clear_queues();
+ }
+ device_ext(const sycl::device &base) : sycl::device(base) {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ init_queues();
+ }
+
+ int is_native_atomic_supported() { return 0; }
+ int get_major_version() const { return dpct::get_major_version(*this); }
+
+ int get_minor_version() const { return dpct::get_minor_version(*this); }
+
+ int get_max_compute_units() const {
+ return get_device_info().get_max_compute_units();
+ }
+
+ /// Return the maximum clock frequency of this device in KHz.
+ int get_max_clock_frequency() const {
+ return get_device_info().get_max_clock_frequency();
+ }
+
+ int get_integrated() const { return get_device_info().get_integrated(); }
+
+ int get_max_sub_group_size() const {
+ return get_device_info().get_max_sub_group_size();
+ }
+
+ int get_max_register_size_per_work_group() const {
+ return get_device_info().get_max_register_size_per_work_group();
+ }
+
+ int get_max_work_group_size() const {
+ return get_device_info().get_max_work_group_size();
+ }
+
+ int get_mem_base_addr_align() const {
+ return get_info<sycl::info::device::mem_base_addr_align>();
+ }
+
+ size_t get_global_mem_size() const {
+ return get_device_info().get_global_mem_size();
+ }
+
+ size_t get_max_mem_alloc_size() const {
+ return get_device_info().get_max_mem_alloc_size();
+ }
+
+ /// Get the number of bytes of free and total memory on the SYCL device.
+ /// \param [out] free_memory The number of bytes of free memory on the
+ /// SYCL device. \param [out] total_memory The number of bytes of total
+ /// memory on the SYCL device.
+ void get_memory_info(size_t &free_memory, size_t &total_memory) {
+ total_memory = get_device_info().get_global_mem_size();
+ const char *warning_info =
+ "get_memory_info: [warning] ext_intel_free_memory is not "
+ "supported (export/set ZES_ENABLE_SYSMAN=1 to support), "
+ "use total memory as free memory";
+#if (defined(__SYCL_COMPILER_VERSION) && __SYCL_COMPILER_VERSION >= 20221105)
+ if (!has(sycl::aspect::ext_intel_free_memory)) {
+ std::cerr << warning_info << std::endl;
+ free_memory = total_memory;
+ } else {
+ free_memory = get_info<sycl::ext::intel::info::device::free_memory>();
+ }
+#else
+ std::cerr << warning_info << std::endl;
+ free_memory = total_memory;
+#if defined(_MSC_VER) && !defined(__clang__)
+#pragma message("Querying the number of bytes of free memory is not supported")
+#else
+#warning "Querying the number of bytes of free memory is not supported"
+#endif
+#endif
+ }
+
+ void get_device_info(device_info &out) const {
+ dpct::get_device_info(out, *this);
+ }
+
+ device_info get_device_info() const {
+ device_info prop;
+ dpct::get_device_info(prop, *this);
+ return prop;
+ }
+
+ void reset() {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ clear_queues();
+ init_queues();
+ }
+
+ sycl::queue &in_order_queue() { return _q_in_order; }
+
+ sycl::queue &out_of_order_queue() { return _q_out_of_order; }
+
+ sycl::queue &default_queue() { return in_order_queue(); }
+
+ void queues_wait_and_throw() {
+ std::unique_lock<mutex_type> lock(m_mutex);
+ lock.unlock();
+ for (auto &q : _queues) {
+ q.wait_and_throw();
+ }
+ // Guard the destruct of current_queues to make sure the ref count is
+ // safe.
+ lock.lock();
+ }
+
+ sycl::queue create_queue(bool enable_exception_handler = false) {
+ return create_in_order_queue(enable_exception_handler);
+ }
+
+ sycl::queue create_queue(sycl::device device,
+ bool enable_exception_handler = false) {
+ return create_in_order_queue(device, enable_exception_handler);
+ }
+
+ sycl::queue create_in_order_queue(bool enable_exception_handler = false) {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ return create_queue_impl(enable_exception_handler,
+ sycl::property::queue::in_order());
+ }
+
+ sycl::queue create_in_order_queue(sycl::device device,
+ bool enable_exception_handler = false) {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ return create_queue_impl(device, enable_exception_handler,
+ sycl::property::queue::in_order());
+ }
+
+ sycl::queue create_out_of_order_queue(
+ bool enable_exception_handler = false) {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ return create_queue_impl(enable_exception_handler);
+ }
+
+ void destroy_queue(sycl::queue queue) {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ _queues.erase(std::remove_if(_queues.begin(), _queues.end(),
+ [=](const sycl::queue &q) -> bool
+ {
+ return q == queue;
+ }),
+ _queues.end());
+ }
+ void set_saved_queue(sycl::queue q) {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ _saved_queue = q;
+ }
+ sycl::queue get_saved_queue() const {
+ std::lock_guard<mutex_type> lock(m_mutex);
+ return _saved_queue;
+ }
+
+ private:
+ void clear_queues() { _queues.clear(); }
+
+ void init_queues() {
+ _q_in_order =
+ create_queue_impl(true, sycl::property::queue::in_order());
+ _q_out_of_order = create_queue_impl(true);
+ _saved_queue = default_queue();
+ }
+
+ /// Caller should acquire resource \p m_mutex before calling this
+ /// function.
+ template <class... Properties>
+ sycl::queue create_queue_impl(bool enable_exception_handler,
+ Properties... properties) {
+ sycl::async_handler eh = {};
+ if (enable_exception_handler) {
+ eh = exception_handler;
+ }
+ _queues.push_back(sycl::queue(
+ *this, eh,
+ sycl::property_list(
+#ifdef DPCT_PROFILING_ENABLED
+ sycl::property::queue::enable_profiling(),
+#endif
+ properties...)));
+
+ return _queues.back();
+ }
+
+ template <class... Properties>
+ sycl::queue create_queue_impl(sycl::device device,
+ bool enable_exception_handler,
+ Properties... properties) {
+ sycl::async_handler eh = {};
+ if (enable_exception_handler) {
+ eh = exception_handler;
+ }
+ _queues.push_back(sycl::queue(
+ device, eh,
+ sycl::property_list(
+#ifdef DPCT_PROFILING_ENABLED
+ sycl::property::queue::enable_profiling(),
+#endif
+ properties...)));
+
+ return _queues.back();
+ }
+
+ void get_version(int &major, int &minor) const {
+ detail::get_version(*this, major, minor);
+ }
+ sycl::queue _q_in_order, _q_out_of_order;
+ sycl::queue _saved_queue;
+ std::vector<sycl::queue> _queues;
+ mutable mutex_type m_mutex;
+ };
+
+
+ /// device manager
+ class dev_mgr
+ {
+ public:
+ device_ext &current_device()
+ {
+ unsigned int dev_id = current_device_id();
+ check_id(dev_id);
+ return *_devs[dev_id];
+ }
+ device_ext &cpu_device() const
+ {
+ std::lock_guard<std::recursive_mutex> lock(m_mutex);
+ if (_cpu_device == -1)
+ {
+ throw std::runtime_error("no valid cpu device");
+ }
+ else
+ {
+ return *_devs[_cpu_device];
+ }
+ }
+ device_ext &get_device(unsigned int id) const
+ {
+ std::lock_guard<std::recursive_mutex> lock(m_mutex);
+ check_id(id);
+ return *_devs[id];
+ }
+ unsigned int current_device_id() const
+ {
+ std::lock_guard<std::recursive_mutex> lock(m_mutex);
+ auto it = _thread2dev_map.find(get_tid());
+ if (it != _thread2dev_map.end())
+ return it->second;
+ return DEFAULT_DEVICE_ID;
+ }
+
+ /// Select device with a device ID.
+ /// \param [in] id The id of the device which can
+ /// be obtained through get_device_id(const sycl::device).
+ void select_device(unsigned int id)
+ {
+ std::lock_guard<std::recursive_mutex> lock(m_mutex);
+ check_id(id);
+ _thread2dev_map[get_tid()] = id;
+ }
+ unsigned int device_count() { return _devs.size(); }
+
+ unsigned int get_device_id(const sycl::device &dev)
+ {
+ unsigned int id = 0;
+ for (auto &dev_item : _devs)
+ {
+ if (*dev_item == dev)
+ {
+ return id;
+ }
+ id++;
+ }
+ return -1;
+ }
+
+ inline std::string get_preferred_gpu_platform_name() {
+ std::string result;
+
+ std::string filter = "";
+ char* env = getenv("ONEAPI_DEVICE_SELECTOR");
+ if (env) {
+ if (std::strstr(env, "level_zero")) {
+ filter = "level-zero";
+ }
+ else if (std::strstr(env, "opencl")) {
+ filter = "opencl";
+ }
+ else if (std::strstr(env, "cuda")) {
+ filter = "cuda";
+ }
+ else if (std::strstr(env, "hip")) {
+ filter = "hip";
+ }
+ else {
+ throw std::runtime_error("invalid device filter: " + std::string(env));
+ }
+ } else {
+ auto default_device = sycl::device(sycl::default_selector_v);
+ auto default_platform_name = default_device.get_platform().get_info<sycl::info::platform::name>();
+
+ if (std::strstr(default_platform_name.c_str(), "Level-Zero") || default_device.is_cpu()) {
+ filter = "level-zero";
+ }
+ else if (std::strstr(default_platform_name.c_str(), "CUDA")) {
+ filter = "cuda";
+ }
+ else if (std::strstr(default_platform_name.c_str(), "HIP")) {
+ filter = "hip";
+ }
+ }
+
+ auto platform_list = sycl::platform::get_platforms();
+
+ for (const auto& platform : platform_list) {
+ auto devices = platform.get_devices();
+ auto gpu_dev = std::find_if(devices.begin(), devices.end(), [](const sycl::device& d) {
+ return d.is_gpu();
+ });
+
+ if (gpu_dev == devices.end()) {
+ // cout << "platform [" << platform_name
+ // << "] does not contain GPU devices, skipping\n";
+ continue;
+ }
+
+ auto platform_name = platform.get_info<sycl::info::platform::name>();
+ std::string platform_name_low_case;
+ platform_name_low_case.resize(platform_name.size());
+
+ std::transform(
+ platform_name.begin(), platform_name.end(), platform_name_low_case.begin(), ::tolower);
+
+ if (platform_name_low_case.find(filter) == std::string::npos) {
+ // cout << "platform [" << platform_name
+ // << "] does not match with requested "
+ // << filter << ", skipping\n";
+ continue;
+ }
+
+ result = platform_name;
+ }
+
+ if (result.empty())
+ throw std::runtime_error("can not find preferred GPU platform");
+
+ return result;
+ }
+
+ template <class DeviceSelector>
+ std::enable_if_t<
+ std::is_invocable_r_v<int, DeviceSelector, const sycl::device &>>
+ select_device(const DeviceSelector &selector = sycl::gpu_selector_v)
+ {
+ sycl::device selected_device = sycl::device(selector);
+ unsigned int selected_device_id = get_device_id(selected_device);
+ select_device(selected_device_id);
+ }
+
+ /// Returns the instance of device manager singleton.
+ static dev_mgr &instance()
+ {
+ static dev_mgr d_m;
+ return d_m;
+ }
+ dev_mgr(const dev_mgr &) = delete;
+ dev_mgr &operator=(const dev_mgr &) = delete;
+ dev_mgr(dev_mgr &&) = delete;
+ dev_mgr &operator=(dev_mgr &&) = delete;
+
+ private:
+ mutable std::recursive_mutex m_mutex;
+ static bool compare_dev(sycl::device &device1, sycl::device &device2)
+ {
+ sycl::backend backend1 = device1.get_backend();
+ sycl::backend backend2 = device2.get_backend();
+ // levelzero backends always come first
+ if(backend1 == sycl::backend::ext_oneapi_level_zero && backend2 != sycl::backend::ext_oneapi_level_zero) return true;
+ if(backend1 != sycl::backend::ext_oneapi_level_zero && backend2 == sycl::backend::ext_oneapi_level_zero) return false;
+ dpct::device_info prop1;
+ dpct::get_device_info(prop1, device1);
+ dpct::device_info prop2;
+ dpct::get_device_info(prop2, device2);
+ return prop1.get_max_compute_units() > prop2.get_max_compute_units();
+ }
+ static int convert_backend_index(std::string & backend) {
+ if (backend == "ext_oneapi_level_zero:gpu") return 0;
+ if (backend == "opencl:gpu") return 1;
+ if (backend == "ext_oneapi_cuda:gpu") return 2;
+ if (backend == "ext_oneapi_hip:gpu") return 3;
+ if (backend == "opencl:cpu") return 4;
+ if (backend == "opencl:acc") return 5;
+ printf("convert_backend_index: can't handle backend=%s\n", backend.c_str());
+ GGML_ABORT("fatal error");
+ }
+ static bool compare_backend(std::string &backend1, std::string &backend2) {
+ return convert_backend_index(backend1) < convert_backend_index(backend2);
+ }
+ dev_mgr()
+ {
+ sycl::device default_device =
+ sycl::device(sycl::default_selector_v);
+ _devs.push_back(std::make_shared<device_ext>(default_device));
+
+ std::vector<sycl::device> sycl_all_devs;
+ // Collect other devices except for the default device.
+ if (default_device.is_cpu())
+ _cpu_device = 0;
+
+ auto Platforms = sycl::platform::get_platforms();
+ // Keep track of the number of devices per backend
+ std::map<sycl::backend, size_t> DeviceNums;
+ std::map<std::string, std::vector<sycl::device>> backend_devices;
+ auto preferred_platform_name = get_preferred_gpu_platform_name();
+
+ while (!Platforms.empty()) {
+ auto Platform = Platforms.back();
+ Platforms.pop_back();
+ auto platform_name = Platform.get_info<sycl::info::platform::name>();
+ if (platform_name.compare(preferred_platform_name) != 0) {
+ continue;
+ }
+ auto devices = Platform.get_devices();
+ std::string backend_type = get_device_backend_and_type(devices[0]);
+ for (const auto &device : devices) {
+ backend_devices[backend_type].push_back(device);
+ }
+ }
+
+ std::vector<std::string> keys;
+ for(auto it = backend_devices.begin(); it != backend_devices.end(); ++it) {
+ keys.push_back(it->first);
+ }
+ std::sort(keys.begin(), keys.end(), compare_backend);
+
+ for (auto &key : keys) {
+ std::vector<sycl::device> devs = backend_devices[key];
+ std::sort(devs.begin(), devs.end(), compare_dev);
+ for (const auto &dev : devs) {
+ sycl_all_devs.push_back(dev);
+ }
+ }
+
+ for (auto &dev : sycl_all_devs)
+ {
+ if (dev == default_device)
+ {
+ continue;
+ }
+ _devs.push_back(std::make_shared<device_ext>(dev));
+ if (_cpu_device == -1 && dev.is_cpu())
+ {
+ _cpu_device = _devs.size() - 1;
+ }
+ }
+ }
+ void check_id(unsigned int id) const
+ {
+ if (id >= _devs.size())
+ {
+ throw std::runtime_error("invalid device id");
+ }
+ }
+ std::vector<std::shared_ptr<device_ext>> _devs;
+ /// DEFAULT_DEVICE_ID is used, if current_device_id() can not find current
+ /// thread id in _thread2dev_map, which means default device should be used
+ /// for the current thread.
+ const unsigned int DEFAULT_DEVICE_ID = 0;
+ /// thread-id to device-id map.
+ std::map<unsigned int, unsigned int> _thread2dev_map;
+ int _cpu_device = -1;
+ };
+
+ static inline sycl::queue &get_default_queue()
+ {
+ return dev_mgr::instance().current_device().default_queue();
+ }
+
+ namespace detail
+ {
+ enum class pointer_access_attribute
+ {
+ host_only = 0,
+ device_only,
+ host_device,
+ end
+ };
+
+ static pointer_access_attribute get_pointer_attribute(sycl::queue &q,
+ const void *ptr)
+ {
+ switch (sycl::get_pointer_type(ptr, q.get_context()))
+ {
+ case sycl::usm::alloc::unknown:
+ return pointer_access_attribute::host_only;
+ case sycl::usm::alloc::device:
+ return pointer_access_attribute::device_only;
+ case sycl::usm::alloc::shared:
+ case sycl::usm::alloc::host:
+ return pointer_access_attribute::host_device;
+ }
+ }
+
+ template <typename ArgT>
+ inline constexpr std::uint64_t get_type_combination_id(ArgT Val)
+ {
+ static_assert((unsigned char)library_data_t::library_data_t_size <=
+ std::numeric_limits<unsigned char>::max() &&
+ "library_data_t size exceeds limit.");
+ static_assert(std::is_same_v<ArgT, library_data_t>, "Unsupported ArgT");
+ return (std::uint64_t)Val;
+ }
+
+ template <typename FirstT, typename... RestT>
+ inline constexpr std::uint64_t get_type_combination_id(FirstT FirstVal,
+ RestT... RestVal)
+ {
+ static_assert((std::uint8_t)library_data_t::library_data_t_size <=
+ std::numeric_limits<unsigned char>::max() &&
+ "library_data_t size exceeds limit.");
+ static_assert(sizeof...(RestT) <= 8 && "Too many parameters");
+ static_assert(std::is_same_v<FirstT, library_data_t>, "Unsupported FirstT");
+ return get_type_combination_id(RestVal...) << 8 | ((std::uint64_t)FirstVal);
+ }
+
+ class mem_mgr
+ {
+ mem_mgr()
+ {
+ // Reserved address space, no real memory allocation happens here.
+#if defined(__linux__)
+ mapped_address_space =
+ (byte_t *)mmap(nullptr, mapped_region_size, PROT_NONE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+#elif defined(_WIN64)
+ mapped_address_space = (byte_t *)VirtualAlloc(
+ NULL, // NULL specified as the base address parameter
+ mapped_region_size, // Size of allocation
+ MEM_RESERVE, // Allocate reserved pages
+ PAGE_NOACCESS); // Protection = no access
+#else
+#error "Only support Windows and Linux."
+#endif
+ next_free = mapped_address_space;
+ }
+
+ public:
+ using buffer_id_t = int;
+
+ struct allocation
+ {
+ buffer_t buffer;
+ byte_t *alloc_ptr;
+ size_t size;
+ };
+
+ ~mem_mgr()
+ {
+#if defined(__linux__)
+ munmap(mapped_address_space, mapped_region_size);
+#elif defined(_WIN64)
+ VirtualFree(mapped_address_space, 0, MEM_RELEASE);
+#else
+#error "Only support Windows and Linux."
+#endif
+ }
+
+ mem_mgr(const mem_mgr &) = delete;
+ mem_mgr &operator=(const mem_mgr &) = delete;
+ mem_mgr(mem_mgr &&) = delete;
+ mem_mgr &operator=(mem_mgr &&) = delete;
+
+ /// Allocate
+ void *mem_alloc(size_t size)
+ {
+ if (!size)
+ return nullptr;
+ std::lock_guard<std::mutex> lock(m_mutex);
+ if (next_free + size > mapped_address_space + mapped_region_size)
+ {
+ throw std::runtime_error("dpct_malloc: out of memory for virtual memory pool");
+ }
+ // Allocation
+ sycl::range<1> r(size);
+ buffer_t buf(r);
+ allocation A{buf, next_free, size};
+ // Map allocation to device pointer
+ void *result = next_free;
+ m_map.emplace(next_free + size, A);
+ // Update pointer to the next free space.
+ next_free += (size + extra_padding + alignment - 1) & ~(alignment - 1);
+
+ return result;
+ }
+
+ /// Deallocate
+ void mem_free(const void *ptr)
+ {
+ if (!ptr)
+ return;
+ std::lock_guard<std::mutex> lock(m_mutex);
+ auto it = get_map_iterator(ptr);
+ m_map.erase(it);
+ }
+
+ /// map: device pointer -> allocation(buffer, alloc_ptr, size)
+ allocation translate_ptr(const void *ptr)
+ {
+ std::lock_guard<std::mutex> lock(m_mutex);
+ auto it = get_map_iterator(ptr);
+ return it->second;
+ }
+
+ /// Check if the pointer represents device pointer or not.
+ bool is_device_ptr(const void *ptr) const
+ {
+ std::lock_guard<std::mutex> lock(m_mutex);
+ return (mapped_address_space <= ptr) &&
+ (ptr < mapped_address_space + mapped_region_size);
+ }
+
+ /// Returns the instance of memory manager singleton.
+ static mem_mgr &instance()
+ {
+ static mem_mgr m;
+ return m;
+ }
+
+ private:
+ std::map<byte_t *, allocation> m_map;
+ mutable std::mutex m_mutex;
+ byte_t *mapped_address_space;
+ byte_t *next_free;
+ const size_t mapped_region_size = 128ull * 1024 * 1024 * 1024;
+ const size_t alignment = 256;
+ /// This padding may be defined to some positive value to debug
+ /// out of bound accesses.
+ const size_t extra_padding = 0;
+
+ std::map<byte_t *, allocation>::iterator get_map_iterator(const void *ptr)
+ {
+ auto it = m_map.upper_bound(const_cast<byte_t *>(reinterpret_cast<const byte_t *>(ptr)));
+ if (it == m_map.end())
+ {
+ // Not a virtual pointer.
+ throw std::runtime_error("can not get buffer from non-virtual pointer");
+ }
+ const allocation &alloc = it->second;
+ if (ptr < alloc.alloc_ptr)
+ {
+ // Out of bound.
+ // This may happen if there's a gap between allocations due to alignment
+ // or extra padding and pointer points to this gap.
+ throw std::runtime_error("invalid virtual pointer");
+ }
+ return it;
+ }
+ };
+
+ template <class T, memory_region Memory, size_t Dimension>
+ class accessor;
+ template <memory_region Memory, class T = byte_t>
+ class memory_traits
+ {
+ public:
+ static constexpr sycl::access::target target =
+ sycl::access::target::device;
+ static constexpr sycl::access_mode mode =
+ (Memory == constant) ? sycl::access_mode::read
+ : sycl::access_mode::read_write;
+ static constexpr size_t type_size = sizeof(T);
+ using element_t =
+ typename std::conditional<Memory == constant, const T, T>::type;
+ using value_t = typename std::remove_cv<T>::type;
+ template <size_t Dimension = 1>
+ using accessor_t = typename std::conditional<
+ Memory == local, sycl::local_accessor<value_t, Dimension>,
+ sycl::accessor<T, Dimension, mode, target>>::type;
+ using pointer_t = T *;
+ };
+
+ static inline void *dpct_malloc(size_t size, sycl::queue &q)
+ {
+ return sycl::malloc_device(size, q.get_device(), q.get_context());
+ }
+
+#define PITCH_DEFAULT_ALIGN(x) (((x) + 31) & ~(0x1F))
+ static inline void *dpct_malloc(size_t &pitch, size_t x, size_t y, size_t z,
+ sycl::queue &q)
+ {
+ pitch = PITCH_DEFAULT_ALIGN(x);
+ return dpct_malloc(pitch * y * z, q);
+ }
+
+ /**
+ * @brief Sets \p value to the first \p size elements starting from \p dev_ptr in \p q.
+ * @tparam valueT The type of the element to be set.
+ * @param [in] q The queue in which the operation is done.
+ * @param [in] dev_ptr Pointer to the virtual device memory address.
+ * @param [in] value The value to be set.
+ * @param [in] size Number of elements to be set to the value.
+ * @return An event representing the memset operation.
+ */
+ template <typename valueT>
+ static inline sycl::event dpct_memset(sycl::queue &q, void *dev_ptr,
+ valueT value, size_t size)
+ {
+ return q.fill(dev_ptr, value, size);
+ }
+
+ /**
+ * @brief Sets \p value to the 3D memory region pointed by \p data in \p q.
+ * @tparam valueT The type of the element to be set.
+ * @param [in] q The queue in which the operation is done.
+ * @param [in] data Pointer to the pitched device memory region.
+ * @param [in] value The value to be set.
+ * @param [in] size 3D memory region by number of elements.
+ * @return An event list representing the memset operations.
+ */
+ template <typename valueT>
+ static inline std::vector<sycl::event>
+ dpct_memset(sycl::queue &q, pitched_data data, valueT value,
+ sycl::range<3> size)
+ {
+ std::vector<sycl::event> event_list;
+ size_t slice = data.get_pitch() * data.get_y();
+ unsigned char *data_surface = (unsigned char *)data.get_data_ptr();
+ for (size_t z = 0; z < size.get(2); ++z)
+ {
+ unsigned char *data_ptr = data_surface;
+ for (size_t y = 0; y < size.get(1); ++y)
+ {
+ event_list.push_back(dpct_memset(q, data_ptr, value, size.get(0)));
+ data_ptr += data.get_pitch();
+ }
+ data_surface += slice;
+ }
+ return event_list;
+ }
+
+ /**
+ * @brief Sets \p val to the pitched 2D memory region pointed by \p ptr in \p q.
+ * @tparam valueT The type of the element to be set.
+ * @param [in] q The queue in which the operation is done.
+ * @param [in] ptr Pointer to the virtual device memory.
+ * @param [in] pitch The pitch size by number of elements, including padding.
+ * @param [in] val The value to be set.
+ * @param [in] x The width of memory region by number of elements.
+ * @param [in] y The height of memory region by number of elements.
+ * @return An event list representing the memset operations.
+ */
+ template <typename valueT>
+ static inline std::vector<sycl::event>
+ dpct_memset(sycl::queue &q, void *ptr, size_t pitch, valueT val, size_t x,
+ size_t y)
+ {
+ return dpct_memset(q, pitched_data(ptr, pitch, x, 1), val,
+ sycl::range<3>(x, y, 1));
+ }
+
+ static memcpy_direction deduce_memcpy_direction(sycl::queue &q, void *to_ptr,
+ const void *from_ptr,
+ memcpy_direction dir)
+ {
+ switch (dir)
+ {
+ case memcpy_direction::host_to_host:
+ case memcpy_direction::host_to_device:
+ case memcpy_direction::device_to_host:
+ case memcpy_direction::device_to_device:
+ return dir;
+ case memcpy_direction::automatic:
+ {
+ // table[to_attribute][from_attribute]
+ static const memcpy_direction
+ direction_table[static_cast<unsigned>(pointer_access_attribute::end)]
+ [static_cast<unsigned>(pointer_access_attribute::end)] =
+ {{memcpy_direction::host_to_host,
+ memcpy_direction::device_to_host,
+ memcpy_direction::host_to_host},
+ {memcpy_direction::host_to_device,
+ memcpy_direction::device_to_device,
+ memcpy_direction::device_to_device},
+ {memcpy_direction::host_to_host,
+ memcpy_direction::device_to_device,
+ memcpy_direction::device_to_device}};
+ return direction_table[static_cast<unsigned>(get_pointer_attribute(
+ q, to_ptr))][static_cast<unsigned>(get_pointer_attribute(q, from_ptr))];
+ }
+ default:
+ throw std::runtime_error("dpct_memcpy: invalid direction value");
+ }
+ }
+
+ static sycl::event
+ dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
+ memcpy_direction direction,
+ const std::vector<sycl::event> &dep_events = {})
+ {
+ if (!size)
+ return sycl::event{};
+ return q.memcpy(to_ptr, from_ptr, size, dep_events);
+ GGML_UNUSED(direction);
+ }
+
+ // Get actual copy range and make sure it will not exceed range.
+ static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
+ size_t pitch)
+ {
+ return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
+ }
+
+ static inline size_t get_offset(sycl::id<3> id, size_t slice,
+ size_t pitch)
+ {
+ return slice * id.get(2) + pitch * id.get(1) + id.get(0);
+ }
+
+ /// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
+ /// and \p from_range to another specified by \p to_ptr and \p to_range.
+ static inline std::vector<sycl::event>
+ dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
+ sycl::range<3> to_range, sycl::range<3> from_range,
+ sycl::id<3> to_id, sycl::id<3> from_id,
+ sycl::range<3> size, memcpy_direction direction,
+ const std::vector<sycl::event> &dep_events = {})
+ {
+ // RAII for host pointer
+ class host_buffer
+ {
+ void *_buf;
+ size_t _size;
+ sycl::queue &_q;
+ const std::vector<sycl::event> &_deps; // free operation depends
+
+ public:
+ host_buffer(size_t size, sycl::queue &q,
+ const std::vector<sycl::event> &deps)
+ : _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
+ void *get_ptr() const { return _buf; }
+ size_t get_size() const { return _size; }
+ ~host_buffer()
+ {
+ if (_buf)
+ {
+ _q.submit([&](sycl::handler &cgh)
+ {
+ cgh.depends_on(_deps);
+ cgh.host_task([buf = _buf] { std::free(buf); }); });
+ }
+ }
+ };
+ std::vector<sycl::event> event_list;
+
+ size_t to_slice = to_range.get(1) * to_range.get(0),
+ from_slice = from_range.get(1) * from_range.get(0);
+ unsigned char *to_surface =
+ (unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
+ const unsigned char *from_surface =
+ (const unsigned char *)from_ptr +
+ get_offset(from_id, from_slice, from_range.get(0));
+
+ if (to_slice == from_slice && to_slice == size.get(1) * size.get(0))
+ {
+ return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
+ direction, dep_events)};
+ }
+ direction = deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
+ size_t size_slice = size.get(1) * size.get(0);
+ switch (direction)
+ {
+ case host_to_host:
+ for (size_t z = 0; z < size.get(2); ++z)
+ {
+ unsigned char *to_ptr = to_surface;
+ const unsigned char *from_ptr = from_surface;
+ if (to_range.get(0) == from_range.get(0) &&
+ to_range.get(0) == size.get(0))
+ {
+ event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
+ direction, dep_events));
+ }
+ else
+ {
+ for (size_t y = 0; y < size.get(1); ++y)
+ {
+ event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
+ direction, dep_events));
+ to_ptr += to_range.get(0);
+ from_ptr += from_range.get(0);
+ }
+ }
+ to_surface += to_slice;
+ from_surface += from_slice;
+ }
+ break;
+ case host_to_device:
+ {
+ host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
+ event_list);
+ std::vector<sycl::event> host_events;
+ if (to_slice == size_slice)
+ {
+ // Copy host data to a temp host buffer with the shape of target.
+ host_events =
+ dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
+ sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
+ host_to_host, dep_events);
+ }
+ else
+ {
+ // Copy host data to a temp host buffer with the shape of target.
+ host_events = dpct_memcpy(
+ q, buf.get_ptr(), from_surface, to_range, from_range,
+ sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
+ // If has padding data, not sure whether it is useless. So fill temp
+ // buffer with it.
+ std::vector<sycl::event>{
+ dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
+ device_to_host, dep_events)});
+ }
+ // Copy from temp host buffer to device with only one submit.
+ event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
+ buf.get_size(), host_to_device,
+ host_events));
+ break;
+ }
+ case device_to_host:
+ {
+ host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
+ event_list);
+ // Copy from host temp buffer to host target with reshaping.
+ event_list = dpct_memcpy(
+ q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
+ sycl::id<3>(0, 0, 0), size, host_to_host,
+ // Copy from device to temp host buffer with only one submit.
+ std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
+ buf.get_size(),
+ device_to_host, dep_events)});
+ break;
+ }
+ case device_to_device:
+ event_list.push_back(q.submit([&](sycl::handler &cgh){
+ cgh.depends_on(dep_events);
+ cgh.parallel_for<class dpct_memcpy_3d_detail>(
+ size,
+ [=](sycl::id<3> id) {
+ to_surface[get_offset(id, to_slice, to_range.get(0))] =
+ from_surface[get_offset(id, from_slice, from_range.get(0))];
+ }); }));
+ break;
+ default:
+ throw std::runtime_error("dpct_memcpy: invalid direction value");
+ }
+ return event_list;
+ }
+
+ /// memcpy 2D/3D matrix specified by pitched_data.
+ static inline std::vector<sycl::event>
+ dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
+ pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
+ memcpy_direction direction = automatic)
+ {
+ return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
+ sycl::range<3>(to.get_pitch(), to.get_y(), 1),
+ sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
+ size, direction);
+ }
+
+ /// memcpy 2D matrix with pitch.
+ static inline std::vector<sycl::event>
+ dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
+ size_t to_pitch, size_t from_pitch, size_t x, size_t y,
+ memcpy_direction direction = automatic)
+ {
+ return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
+ sycl::range<3>(from_pitch, y, 1),
+ sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
+ sycl::range<3>(x, y, 1), direction);
+ }
+
+ namespace deprecated
+ {
+
+ template <typename T, sycl::usm::alloc AllocKind>
+ class usm_allocator
+ {
+ private:
+ using Alloc = sycl::usm_allocator<T, AllocKind>;
+ Alloc _impl;
+
+ public:
+ using value_type = typename std::allocator_traits<Alloc>::value_type;
+ using pointer = typename std::allocator_traits<Alloc>::pointer;
+ using const_pointer = typename std::allocator_traits<Alloc>::const_pointer;
+ using void_pointer = typename std::allocator_traits<Alloc>::void_pointer;
+ using const_void_pointer =
+ typename std::allocator_traits<Alloc>::const_void_pointer;
+ using reference = typename std::allocator_traits<Alloc>::value_type &;
+ using const_reference =
+ const typename std::allocator_traits<Alloc>::value_type &;
+ using difference_type =
+ typename std::allocator_traits<Alloc>::difference_type;
+ using size_type = typename std::allocator_traits<Alloc>::size_type;
+ using propagate_on_container_copy_assignment = typename std::allocator_traits<
+ Alloc>::propagate_on_container_copy_assignment;
+ using propagate_on_container_move_assignment = typename std::allocator_traits<
+ Alloc>::propagate_on_container_move_assignment;
+ using propagate_on_container_swap =
+ typename std::allocator_traits<Alloc>::propagate_on_container_swap;
+ using is_always_equal =
+ typename std::allocator_traits<Alloc>::is_always_equal;
+
+ template <typename U>
+ struct rebind
+ {
+ typedef usm_allocator<U, AllocKind> other;
+ };
+
+ usm_allocator() : _impl(dpct::get_default_queue()) {}
+ ~usm_allocator() {}
+ usm_allocator(const usm_allocator &other) : _impl(other._impl) {}
+ usm_allocator(usm_allocator &&other) : _impl(std::move(other._impl)) {}
+ pointer address(reference r) { return &r; }
+ const_pointer address(const_reference r) { return &r; }
+ pointer allocate(size_type cnt, const_void_pointer hint = nullptr)
+ {
+ return std::allocator_traits<Alloc>::allocate(_impl, cnt, hint);
+ }
+ void deallocate(pointer p, size_type cnt)
+ {
+ std::allocator_traits<Alloc>::deallocate(_impl, p, cnt);
+ }
+ size_type max_size() const
+ {
+ return std::allocator_traits<Alloc>::max_size(_impl);
+ }
+ bool operator==(const usm_allocator &other) const { return _impl == other._impl; }
+ bool operator!=(const usm_allocator &other) const { return _impl != other._impl; }
+ };
+
+ } // namespace deprecated
+
+ inline void dpct_free(void *ptr,
+ const sycl::queue &q)
+ {
+ if (ptr)
+ {
+ sycl::free(ptr, q.get_context());
+ }
+ }
+
+ template <typename T>
+ inline auto get_memory(const void *x)
+ {
+ T *new_x = reinterpret_cast<T *>(const_cast<void *>(x));
+ return new_x;
+ }
+
+ template <typename T>
+ inline typename DataType<T>::T2 get_value(const T *s, sycl::queue &q)
+ {
+ using Ty = typename DataType<T>::T2;
+ Ty s_h;
+ if (get_pointer_attribute(q, s) == pointer_access_attribute::device_only)
+ detail::dpct_memcpy(q, (void *)&s_h, (const void *)s, sizeof(T), device_to_host)
+ .wait();
+ else
+ s_h = *reinterpret_cast<const Ty *>(s);
+ return s_h;
+ }
+
+ } // namespace detail
+
+ template <typename T>
+ inline auto get_value(const T *s, sycl::queue &q)
+ {
+ return detail::get_value(s, q);
+ }
+
+ namespace detail
+ {
+ template <class Ta, class Tb, class Tc, class Ts>
+ inline void gemm_impl(sycl::queue & q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m,
+ int n, int k, const void * alpha, const void * a, int lda, const void * b, int ldb,
+ const void * beta, void * c, int ldc) {
+ Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
+ Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
+ auto data_a = get_memory<const Ta>(a);
+ auto data_b = get_memory<const Tb>(b);
+ auto data_c = get_memory<Tc>(c);
+ oneapi::mkl::blas::column_major::gemm(q, a_trans, b_trans, m, n, k, alpha_value, data_a,
+ lda, data_b, ldb, beta_value, data_c, ldc);
+ }
+
+ template <typename VecT, class BinaryOperation, class = void>
+ class vectorized_binary
+ {
+ public:
+ inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op)
+ {
+ VecT v4;
+ for (size_t i = 0; i < v4.size(); ++i)
+ {
+ v4[i] = binary_op(a[i], b[i]);
+ }
+ return v4;
+ }
+ };
+
+ template <typename VecT, class BinaryOperation>
+ class vectorized_binary<
+ VecT, BinaryOperation,
+ std::void_t<std::invoke_result_t<BinaryOperation, VecT, VecT>>>
+ {
+ public:
+ inline VecT operator()(VecT a, VecT b, const BinaryOperation binary_op)
+ {
+ return binary_op(a, b).template as<VecT>();
+ }
+ };
+
+ template <class Ta, class Tb, class Tc, class Ts>
+ inline void gemm_batch_impl(sycl::queue & q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans,
+ int m, int n, int k, const void * alpha, const void ** a, int lda, const void ** b,
+ int ldb, const void * beta, void ** c, int ldc, int batch_size,
+ matrix_info_t<float> * matrix_info) {
+ Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
+ Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
+
+ matrix_info->transpose_info[0] = a_trans;
+ matrix_info->transpose_info[1] = b_trans;
+ matrix_info->value_info[0] = alpha_value;
+ matrix_info->value_info[1] = beta_value;
+ matrix_info->size_info[0] = m;
+ matrix_info->size_info[1] = n;
+ matrix_info->size_info[2] = k;
+ matrix_info->ld_info[0] = lda;
+ matrix_info->ld_info[1] = ldb;
+ matrix_info->ld_info[2] = ldc;
+ matrix_info->groupsize_info = batch_size;
+
+ sycl::event e = oneapi::mkl::blas::column_major::gemm_batch(
+ q, matrix_info->transpose_info, matrix_info->transpose_info + 1,
+ matrix_info->size_info, matrix_info->size_info + 1, matrix_info->size_info + 2,
+ reinterpret_cast<Ts *>(matrix_info->value_info), reinterpret_cast<const Ta **>(a), matrix_info->ld_info,
+ reinterpret_cast<const Tb **>(b), matrix_info->ld_info + 1,
+ reinterpret_cast<Ts *>(matrix_info->value_info + 1), reinterpret_cast<Tc **>(c),
+ matrix_info->ld_info + 2, 1, &(matrix_info->groupsize_info));
+ }
+
+ template <class Ta, class Tb, class Tc, class Ts>
+ inline void gemm_batch_impl(sycl::queue & q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans,
+ int m, int n, int k, const void * alpha, const void * a, int lda,
+ long long int stride_a, const void * b, int ldb, long long int stride_b,
+ const void * beta, void * c, int ldc, long long int stride_c, int batch_size) {
+ Ts alpha_value = dpct::get_value(reinterpret_cast<const Ts *>(alpha), q);
+ Ts beta_value = dpct::get_value(reinterpret_cast<const Ts *>(beta), q);
+ auto data_a = get_memory<const Ta>(a);
+ auto data_b = get_memory<const Tb>(b);
+ auto data_c = get_memory<Tc>(c);
+ oneapi::mkl::blas::column_major::gemm_batch(q, a_trans, b_trans, m, n, k, alpha_value,
+ data_a, lda, stride_a, data_b, ldb, stride_b, beta_value,
+ data_c, ldc, stride_c, batch_size);
+ }
+
+ } // namespace detail
+
+ template <typename VecT, class BinaryOperation>
+ inline unsigned vectorized_binary(unsigned a, unsigned b,
+ const BinaryOperation binary_op)
+ {
+ sycl::vec<unsigned, 1> v0{a}, v1{b};
+ auto v2 = v0.as<VecT>();
+ auto v3 = v1.as<VecT>();
+ auto v4 =
+ detail::vectorized_binary<VecT, BinaryOperation>()(v2, v3, binary_op);
+ v0 = v4.template as<sycl::vec<unsigned, 1>>();
+ return v0;
+ }
+
+ static void async_dpct_memcpy(void *to_ptr, const void *from_ptr, size_t size,
+ memcpy_direction direction = automatic,
+ sycl::queue &q = dpct::get_default_queue())
+ {
+ detail::dpct_memcpy(q, to_ptr, from_ptr, size, direction);
+ }
+
+ static inline unsigned int select_device(unsigned int id)
+ {
+ dev_mgr::instance().select_device(id);
+ return id;
+ }
+
+ template <typename T>
+ T permute_sub_group_by_xor(sycl::sub_group g, T x, unsigned int mask,
+ unsigned int logical_sub_group_size = 32)
+ {
+ unsigned int id = g.get_local_linear_id();
+ unsigned int start_index =
+ id / logical_sub_group_size * logical_sub_group_size;
+ unsigned int target_offset = (id % logical_sub_group_size) ^ mask;
+ return sycl::select_from_group(g, x,
+ target_offset < logical_sub_group_size
+ ? start_index + target_offset
+ : id);
+ }
+
+ template <typename T1, typename T2>
+ using dot_product_acc_t = std::conditional_t<
+ std::is_unsigned_v<T1> && std::is_unsigned_v<T2>,
+ uint32_t,
+ int32_t>;
+
+ template <typename T>
+ sycl::vec<T, 4> extract_and_sign_or_zero_extend4(T val) {
+ return sycl::vec<T, 1>(val)
+ .template as<sycl::vec<
+ std::conditional_t<std::is_signed_v<T>, int8_t, uint8_t>,
+ 4>>()
+ .template convert<T>();
+ }
+
+ template <typename T1, typename T2, typename T3>
+ inline auto dp4a(T1 a, T2 b, T3 c) {
+ dot_product_acc_t<T1, T2> res = c;
+ auto va = extract_and_sign_or_zero_extend4(a);
+ auto vb = extract_and_sign_or_zero_extend4(b);
+ res += va[0] * vb[0];
+ res += va[1] * vb[1];
+ res += va[2] * vb[2];
+ res += va[3] * vb[3];
+ return res;
+ }
+
+ struct sub_sat
+ {
+ template <typename T>
+ auto operator()(const T x, const T y) const
+ {
+ return sycl::sub_sat(x, y);
+ }
+ };
+
+ template <typename S, typename T>
+ inline T vectorized_min(T a, T b)
+ {
+ sycl::vec<T, 1> v0{a}, v1{b};
+ auto v2 = v0.template as<S>();
+ auto v3 = v1.template as<S>();
+ auto v4 = sycl::min(v2, v3);
+ v0 = v4.template as<sycl::vec<T, 1>>();
+ return v0;
+ }
+
+ inline float pow(const float a, const int b) { return sycl::pown(a, b); }
+ inline double pow(const double a, const int b) { return sycl::pown(a, b); }
+ inline float pow(const float a, const float b) { return sycl::pow(a, b); }
+ inline double pow(const double a, const double b) { return sycl::pow(a, b); }
+ template <typename T, typename U>
+ inline typename std::enable_if_t<std::is_floating_point_v<T>, T>
+ pow(const T a, const U b)
+ {
+ return sycl::pow(a, static_cast<T>(b));
+ }
+ template <typename T, typename U>
+ inline typename std::enable_if_t<!std::is_floating_point_v<T>, double>
+ pow(const T a, const U b)
+ {
+ return sycl::pow(static_cast<double>(a), static_cast<double>(b));
+ }
+
+ inline double min(const double a, const float b)
+ {
+ return sycl::fmin(a, static_cast<double>(b));
+ }
+ inline double min(const float a, const double b)
+ {
+ return sycl::fmin(static_cast<double>(a), b);
+ }
+ inline float min(const float a, const float b) { return sycl::fmin(a, b); }
+ inline double min(const double a, const double b) { return sycl::fmin(a, b); }
+ inline std::uint32_t min(const std::uint32_t a, const std::int32_t b)
+ {
+ return sycl::min(a, static_cast<std::uint32_t>(b));
+ }
+ inline std::uint32_t min(const std::int32_t a, const std::uint32_t b)
+ {
+ return sycl::min(static_cast<std::uint32_t>(a), b);
+ }
+ inline std::int32_t min(const std::int32_t a, const std::int32_t b)
+ {
+ return sycl::min(a, b);
+ }
+ inline std::uint32_t min(const std::uint32_t a, const std::uint32_t b)
+ {
+ return sycl::min(a, b);
+ }
+ inline std::uint64_t min(const std::uint64_t a, const std::int64_t b)
+ {
+ return sycl::min(a, static_cast<std::uint64_t>(b));
+ }
+ inline std::uint64_t min(const std::int64_t a, const std::uint64_t b)
+ {
+ return sycl::min(static_cast<std::uint64_t>(a), b);
+ }
+ inline std::int64_t min(const std::int64_t a, const std::int64_t b)
+ {
+ return sycl::min(a, b);
+ }
+ inline std::uint64_t min(const std::uint64_t a, const std::uint64_t b)
+ {
+ return sycl::min(a, b);
+ }
+ inline std::uint64_t min(const std::uint64_t a, const std::int32_t b)
+ {
+ return sycl::min(a, static_cast<std::uint64_t>(b));
+ }
+ inline std::uint64_t min(const std::int32_t a, const std::uint64_t b)
+ {
+ return sycl::min(static_cast<std::uint64_t>(a), b);
+ }
+ inline std::uint64_t min(const std::uint64_t a, const std::uint32_t b)
+ {
+ return sycl::min(a, static_cast<std::uint64_t>(b));
+ }
+ inline std::uint64_t min(const std::uint32_t a, const std::uint64_t b)
+ {
+ return sycl::min(static_cast<std::uint64_t>(a), b);
+ }
+ // max function overloads.
+ // For floating-point types, `float` or `double` arguments are acceptable.
+ // For integer types, `std::uint32_t`, `std::int32_t`, `std::uint64_t` or
+ // `std::int64_t` type arguments are acceptable.
+ inline double max(const double a, const float b)
+ {
+ return sycl::fmax(a, static_cast<double>(b));
+ }
+ inline double max(const float a, const double b)
+ {
+ return sycl::fmax(static_cast<double>(a), b);
+ }
+ inline float max(const float a, const float b) { return sycl::fmax(a, b); }
+ inline double max(const double a, const double b) { return sycl::fmax(a, b); }
+ inline std::uint32_t max(const std::uint32_t a, const std::int32_t b)
+ {
+ return sycl::max(a, static_cast<std::uint32_t>(b));
+ }
+ inline std::uint32_t max(const std::int32_t a, const std::uint32_t b)
+ {
+ return sycl::max(static_cast<std::uint32_t>(a), b);
+ }
+ inline std::int32_t max(const std::int32_t a, const std::int32_t b)
+ {
+ return sycl::max(a, b);
+ }
+ inline std::uint32_t max(const std::uint32_t a, const std::uint32_t b)
+ {
+ return sycl::max(a, b);
+ }
+ inline std::uint64_t max(const std::uint64_t a, const std::int64_t b)
+ {
+ return sycl::max(a, static_cast<std::uint64_t>(b));
+ }
+ inline std::uint64_t max(const std::int64_t a, const std::uint64_t b)
+ {
+ return sycl::max(static_cast<std::uint64_t>(a), b);
+ }
+ inline std::int64_t max(const std::int64_t a, const std::int64_t b)
+ {
+ return sycl::max(a, b);
+ }
+ inline std::uint64_t max(const std::uint64_t a, const std::uint64_t b)
+ {
+ return sycl::max(a, b);
+ }
+ inline std::uint64_t max(const std::uint64_t a, const std::int32_t b)
+ {
+ return sycl::max(a, static_cast<std::uint64_t>(b));
+ }
+ inline std::uint64_t max(const std::int32_t a, const std::uint64_t b)
+ {
+ return sycl::max(static_cast<std::uint64_t>(a), b);
+ }
+ inline std::uint64_t max(const std::uint64_t a, const std::uint32_t b)
+ {
+ return sycl::max(a, static_cast<std::uint64_t>(b));
+ }
+ inline std::uint64_t max(const std::uint32_t a, const std::uint64_t b)
+ {
+ return sycl::max(static_cast<std::uint64_t>(a), b);
+ }
+
+ inline void
+ has_capability_or_fail(const sycl::device &dev,
+ const std::initializer_list<sycl::aspect> &props)
+ {
+ for (const auto &it : props)
+ {
+ if (dev.has(it))
+ continue;
+ switch (it)
+ {
+ case sycl::aspect::fp64:
+ throw std::runtime_error("'double' is not supported in '" +
+ dev.get_info<sycl::info::device::name>() +
+ "' device");
+ break;
+ case sycl::aspect::fp16:
+ throw std::runtime_error("'half' is not supported in '" +
+ dev.get_info<sycl::info::device::name>() +
+ "' device");
+ break;
+ default:
+#define __SYCL_ASPECT(ASPECT, ID) \
+ case sycl::aspect::ASPECT: \
+ return #ASPECT;
+#define __SYCL_ASPECT_DEPRECATED(ASPECT, ID, MESSAGE) __SYCL_ASPECT(ASPECT, ID)
+#define __SYCL_ASPECT_DEPRECATED_ALIAS(ASPECT, ID, MESSAGE)
+ auto getAspectNameStr = [](sycl::aspect AspectNum) -> std::string
+ {
+ switch (AspectNum)
+ {
+#include <sycl/info/aspects.def>
+#include <sycl/info/aspects_deprecated.def>
+ default:
+ return "unknown aspect";
+ }
+ };
+#undef __SYCL_ASPECT_DEPRECATED_ALIAS
+#undef __SYCL_ASPECT_DEPRECATED
+#undef __SYCL_ASPECT
+ throw std::runtime_error(
+ "'" + getAspectNameStr(it) + "' is not supported in '" +
+ dev.get_info<sycl::info::device::name>() + "' device");
+ }
+ break;
+ }
+ }
+
+ static inline unsigned int get_current_device_id()
+ {
+ return dev_mgr::instance().current_device_id();
+ }
+
+ static inline device_ext &get_current_device()
+ {
+ return dev_mgr::instance().current_device();
+ }
+
+ static inline device_ext &get_device(unsigned int id)
+ {
+ return dev_mgr::instance().get_device(id);
+ }
+
+ static inline sycl::queue &get_in_order_queue()
+ {
+ return dev_mgr::instance().current_device().in_order_queue();
+ }
+
+ static sycl::event
+ dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr, size_t size,
+ memcpy_direction direction,
+ const std::vector<sycl::event> &dep_events = {})
+ {
+ if (!size)
+ return sycl::event{};
+ return q.memcpy(to_ptr, from_ptr, size, dep_events);
+ GGML_UNUSED(direction);
+ }
+
+ // Get actual copy range and make sure it will not exceed range.
+ static inline size_t get_copy_range(sycl::range<3> size, size_t slice,
+ size_t pitch)
+ {
+ return slice * (size.get(2) - 1) + pitch * (size.get(1) - 1) + size.get(0);
+ }
+
+ static inline size_t get_offset(sycl::id<3> id, size_t slice,
+ size_t pitch)
+ {
+ return slice * id.get(2) + pitch * id.get(1) + id.get(0);
+ }
+
+ /// copy 3D matrix specified by \p size from 3D matrix specified by \p from_ptr
+ /// and \p from_range to another specified by \p to_ptr and \p to_range.
+ static inline std::vector<sycl::event>
+ dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
+ sycl::range<3> to_range, sycl::range<3> from_range,
+ sycl::id<3> to_id, sycl::id<3> from_id,
+ sycl::range<3> size, memcpy_direction direction,
+ const std::vector<sycl::event> &dep_events = {})
+ {
+ // RAII for host pointer
+ class host_buffer
+ {
+ void *_buf;
+ size_t _size;
+ sycl::queue &_q;
+ const std::vector<sycl::event> &_deps; // free operation depends
+
+ public:
+ host_buffer(size_t size, sycl::queue &q,
+ const std::vector<sycl::event> &deps)
+ : _buf(std::malloc(size)), _size(size), _q(q), _deps(deps) {}
+ void *get_ptr() const { return _buf; }
+ size_t get_size() const { return _size; }
+ ~host_buffer()
+ {
+ if (_buf)
+ {
+ _q.submit([&](sycl::handler &cgh)
+ {
+ cgh.depends_on(_deps);
+ cgh.host_task([buf = _buf] { std::free(buf); }); });
+ }
+ }
+ };
+ std::vector<sycl::event> event_list;
+
+ size_t to_slice = to_range.get(1) * to_range.get(0),
+ from_slice = from_range.get(1) * from_range.get(0);
+ unsigned char *to_surface =
+ (unsigned char *)to_ptr + get_offset(to_id, to_slice, to_range.get(0));
+ const unsigned char *from_surface =
+ (const unsigned char *)from_ptr +
+ get_offset(from_id, from_slice, from_range.get(0));
+
+ if (to_slice == from_slice && to_slice == size.get(1) * size.get(0))
+ {
+ return {dpct_memcpy(q, to_surface, from_surface, to_slice * size.get(2),
+ direction, dep_events)};
+ }
+ direction = detail::deduce_memcpy_direction(q, to_ptr, from_ptr, direction);
+ size_t size_slice = size.get(1) * size.get(0);
+ switch (direction)
+ {
+ case host_to_host:
+ for (size_t z = 0; z < size.get(2); ++z)
+ {
+ unsigned char *to_ptr = to_surface;
+ const unsigned char *from_ptr = from_surface;
+ if (to_range.get(0) == from_range.get(0) &&
+ to_range.get(0) == size.get(0))
+ {
+ event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size_slice,
+ direction, dep_events));
+ }
+ else
+ {
+ for (size_t y = 0; y < size.get(1); ++y)
+ {
+ event_list.push_back(dpct_memcpy(q, to_ptr, from_ptr, size.get(0),
+ direction, dep_events));
+ to_ptr += to_range.get(0);
+ from_ptr += from_range.get(0);
+ }
+ }
+ to_surface += to_slice;
+ from_surface += from_slice;
+ }
+ break;
+ case host_to_device:
+ {
+ host_buffer buf(get_copy_range(size, to_slice, to_range.get(0)), q,
+ event_list);
+ std::vector<sycl::event> host_events;
+ if (to_slice == size_slice)
+ {
+ // Copy host data to a temp host buffer with the shape of target.
+ host_events =
+ dpct_memcpy(q, buf.get_ptr(), from_surface, to_range, from_range,
+ sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size,
+ host_to_host, dep_events);
+ }
+ else
+ {
+ // Copy host data to a temp host buffer with the shape of target.
+ host_events = dpct_memcpy(
+ q, buf.get_ptr(), from_surface, to_range, from_range,
+ sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0), size, host_to_host,
+ // If has padding data, not sure whether it is useless. So fill temp
+ // buffer with it.
+ std::vector<sycl::event>{
+ dpct_memcpy(q, buf.get_ptr(), to_surface, buf.get_size(),
+ device_to_host, dep_events)});
+ }
+ // Copy from temp host buffer to device with only one submit.
+ event_list.push_back(dpct_memcpy(q, to_surface, buf.get_ptr(),
+ buf.get_size(), host_to_device,
+ host_events));
+ break;
+ }
+ case device_to_host:
+ {
+ host_buffer buf(get_copy_range(size, from_slice, from_range.get(0)), q,
+ event_list);
+ // Copy from host temp buffer to host target with reshaping.
+ event_list = dpct_memcpy(
+ q, to_surface, buf.get_ptr(), to_range, from_range, sycl::id<3>(0, 0, 0),
+ sycl::id<3>(0, 0, 0), size, host_to_host,
+ // Copy from device to temp host buffer with only one submit.
+ std::vector<sycl::event>{dpct_memcpy(q, buf.get_ptr(), from_surface,
+ buf.get_size(),
+ device_to_host, dep_events)});
+ break;
+ }
+ case device_to_device:
+ event_list.push_back(q.submit([&](sycl::handler &cgh)
+ {
+ cgh.depends_on(dep_events);
+ cgh.parallel_for<class dpct_memcpy_3d_detail>(
+ size,
+ [=](sycl::id<3> id) {
+ to_surface[get_offset(id, to_slice, to_range.get(0))] =
+ from_surface[get_offset(id, from_slice, from_range.get(0))];
+ }); }));
+ break;
+ default:
+ throw std::runtime_error("dpct_memcpy: invalid direction value");
+ }
+ return event_list;
+ }
+
+ /// memcpy 2D/3D matrix specified by pitched_data.
+ static inline std::vector<sycl::event>
+ dpct_memcpy(sycl::queue &q, pitched_data to, sycl::id<3> to_id,
+ pitched_data from, sycl::id<3> from_id, sycl::range<3> size,
+ memcpy_direction direction = automatic)
+ {
+ return dpct_memcpy(q, to.get_data_ptr(), from.get_data_ptr(),
+ sycl::range<3>(to.get_pitch(), to.get_y(), 1),
+ sycl::range<3>(from.get_pitch(), from.get_y(), 1), to_id, from_id,
+ size, direction);
+ }
+
+ /// memcpy 2D matrix with pitch.
+ static inline std::vector<sycl::event>
+ dpct_memcpy(sycl::queue &q, void *to_ptr, const void *from_ptr,
+ size_t to_pitch, size_t from_pitch, size_t x, size_t y,
+ memcpy_direction direction = automatic)
+ {
+ return dpct_memcpy(q, to_ptr, from_ptr, sycl::range<3>(to_pitch, y, 1),
+ sycl::range<3>(from_pitch, y, 1),
+ sycl::id<3>(0, 0, 0), sycl::id<3>(0, 0, 0),
+ sycl::range<3>(x, y, 1), direction);
+ }
+
+ inline void gemm(sycl::queue & q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m, int n,
+ int k, const void * alpha, const void * a, library_data_t a_type, int lda, const void * b,
+ library_data_t b_type, int ldb, const void * beta, void * c, library_data_t c_type, int ldc,
+ library_data_t scaling_type) {
+ if (scaling_type == library_data_t::real_float &&
+ c_type == library_data_t::complex_float)
+ {
+ scaling_type = library_data_t::complex_float;
+ }
+ else if (scaling_type == library_data_t::real_double &&
+ c_type == library_data_t::complex_double)
+ {
+ scaling_type = library_data_t::complex_double;
+ }
+
+ std::uint64_t key =
+ detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
+ switch (key)
+ {
+ case detail::get_type_combination_id(
+ library_data_t::real_float, library_data_t::real_float,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_impl<float, float, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_double, library_data_t::real_double,
+ library_data_t::real_double, library_data_t::real_double):
+ {
+ detail::gemm_impl<double, double, double, double>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::complex_float, library_data_t::complex_float,
+ library_data_t::complex_float, library_data_t::complex_float):
+ {
+ detail::gemm_impl<std::complex<float>, std::complex<float>,
+ std::complex<float>, std::complex<float>>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::complex_double, library_data_t::complex_double,
+ library_data_t::complex_double, library_data_t::complex_double):
+ {
+ detail::gemm_impl<std::complex<double>, std::complex<double>,
+ std::complex<double>, std::complex<double>>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_half, library_data_t::real_half):
+ {
+ detail::gemm_impl<sycl::half, sycl::half, sycl::half,
+ sycl::half>(q, a_trans, b_trans, m, n, k, alpha, a,
+ lda, b, ldb, beta, c, ldc);
+ break;
+ }
+#ifdef __INTEL_MKL__
+ case detail::get_type_combination_id(
+ library_data_t::real_bfloat16, library_data_t::real_bfloat16,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_impl<sycl::half, sycl::half, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_half, library_data_t::real_float):
+ {
+ float alpha_value =
+ dpct::get_value(reinterpret_cast<const float *>(alpha), q);
+ float beta_value =
+ dpct::get_value(reinterpret_cast<const float *>(beta), q);
+ sycl::half alpha_half(alpha_value);
+ sycl::half beta_half(beta_value);
+ detail::gemm_impl<sycl::half, sycl::half, sycl::half,
+ sycl::half>(q, a_trans, b_trans, m, n, k, &alpha_half,
+ a, lda, b, ldb, &beta_half, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_int8, library_data_t::real_int8,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_impl<std::int8_t, std::int8_t, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_bfloat16, library_data_t::real_bfloat16,
+ library_data_t::real_bfloat16, library_data_t::real_float):
+ {
+ detail::gemm_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_int8, library_data_t::real_int8,
+ library_data_t::real_int32, library_data_t::real_int32):
+ {
+ float alpha_float =
+ dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
+ float beta_float =
+ dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
+ detail::gemm_impl<std::int8_t, std::int8_t, std::int32_t, float>(
+ q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc);
+ break;
+ }
+#endif // __INTEL_MKL__
+ default:
+ throw std::runtime_error("the combination of data type is unsupported");
+ }
+ } // gemm()
+
+ /// Computes a batch of matrix-matrix product with general matrices.
+ /// \param [in] q The queue where the routine should be executed.
+ /// \param [in] a_trans Specifies the operation applied to A.
+ /// \param [in] b_trans Specifies the operation applied to B.
+ /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
+ /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
+ /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
+ /// \param [in] alpha Scaling factor for the matrix-matrix product.
+ /// \param [in] a Input matrix A.
+ /// \param [in] a_type Data type of the matrix A.
+ /// \param [in] lda Leading dimension of A.
+ /// \param [in] b Input matrix B.
+ /// \param [in] b_type Data type of the matrix B.
+ /// \param [in] ldb Leading dimension of B.
+ /// \param [in] beta Scaling factor for matrix C.
+ /// \param [in, out] c Input/Output matrix C.
+ /// \param [in] c_type Data type of the matrix C.
+ /// \param [in] ldc Leading dimension of C.
+ /// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
+ /// \param [in] scaling_type Data type of the scaling factors.
+ inline void gemm_batch(sycl::queue & q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m,
+ int n, int k, const void * alpha, const void * a[], library_data_t a_type, int lda,
+ const void * b[], library_data_t b_type, int ldb, const void * beta, void * c[],
+ library_data_t c_type, int ldc, int batch_size, library_data_t scaling_type,
+ matrix_info_t<float> * matrix_info) {
+ std::uint64_t key =
+ detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
+ switch (key)
+ {
+ case detail::get_type_combination_id(
+ library_data_t::real_float, library_data_t::real_float,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<float, float, float, float>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb,
+ beta, c, ldc, batch_size, matrix_info);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_double, library_data_t::real_double,
+ library_data_t::real_double, library_data_t::real_double):
+ {
+ detail::gemm_batch_impl<double, double, double, double>(q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb,
+ beta, c, ldc, batch_size, matrix_info);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_half, library_data_t::real_half):
+ {
+ detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info);
+ break;
+ }
+#ifdef __INTEL_MKL__
+ case detail::get_type_combination_id(
+ library_data_t::real_bfloat16, library_data_t::real_bfloat16,
+ library_data_t::real_bfloat16, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_bfloat16, library_data_t::real_bfloat16,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info);
+ break;
+ }
+#endif
+ case detail::get_type_combination_id(
+ library_data_t::real_int8, library_data_t::real_int8,
+ library_data_t::real_int32, library_data_t::real_int32):
+ {
+ float alpha_float =
+ dpct::get_value(reinterpret_cast<const std::int32_t *>(alpha), q);
+ float beta_float =
+ dpct::get_value(reinterpret_cast<const std::int32_t *>(beta), q);
+ detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t, float>(
+ q, a_trans, b_trans, m, n, k, &alpha_float, a, lda, b, ldb, &beta_float, c, ldc, batch_size,
+ matrix_info);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_int8, library_data_t::real_int8,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, b, ldb, beta, c, ldc, batch_size, matrix_info);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_half, library_data_t::real_float):
+ {
+ float alpha_value =
+ dpct::get_value(reinterpret_cast<const float *>(alpha), q);
+ float beta_value =
+ dpct::get_value(reinterpret_cast<const float *>(beta), q);
+ sycl::half alpha_half(alpha_value);
+ sycl::half beta_half(beta_value);
+ detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
+ q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, b, ldb, &beta_half, c, ldc, batch_size, matrix_info);
+ break;
+ }
+ default:
+ throw std::runtime_error("the combination of data type is unsupported");
+ }
+ }
+
+ /// Computes a batch of matrix-matrix product with general matrices.
+ /// \param [in] q The queue where the routine should be executed.
+ /// \param [in] a_trans Specifies the operation applied to A.
+ /// \param [in] b_trans Specifies the operation applied to B.
+ /// \param [in] m Specifies the number of rows of the matrix op(A) and of the matrix C.
+ /// \param [in] n Specifies the number of columns of the matrix op(B) and of the matrix C.
+ /// \param [in] k Specifies the number of columns of the matrix op(A) and the number of rows of the matrix op(B).
+ /// \param [in] alpha Scaling factor for the matrix-matrix product.
+ /// \param [in] a Input matrix A.
+ /// \param [in] a_type Data type of the matrix A.
+ /// \param [in] lda Leading dimension of A.
+ /// \param [in] stride_a Stride between the different A matrices.
+ /// \param [in] b Input matrix B.
+ /// \param [in] b_type Data type of the matrix B.
+ /// \param [in] ldb Leading dimension of B.
+ /// \param [in] stride_b Stride between the different B matrices.
+ /// \param [in] beta Scaling factor for matrix C.
+ /// \param [in, out] c Input/Output matrix C.
+ /// \param [in] c_type Data type of the matrix C.
+ /// \param [in] ldc Leading dimension of C.
+ /// \param [in] stride_c Stride between the different C matrices.
+ /// \param [in] batch_size Specifies the number of matrix multiply operations to perform.
+ /// \param [in] scaling_type Data type of the scaling factors.
+ inline void gemm_batch(sycl::queue & q, oneapi::mkl::transpose a_trans, oneapi::mkl::transpose b_trans, int m,
+ int n, int k, const void * alpha, const void * a, library_data_t a_type, int lda,
+ long long int stride_a, const void * b, library_data_t b_type, int ldb,
+ long long int stride_b, const void * beta, void * c, library_data_t c_type, int ldc,
+ long long int stride_c, int batch_size, library_data_t scaling_type) {
+ if (scaling_type == library_data_t::real_float &&
+ c_type == library_data_t::complex_float)
+ {
+ scaling_type = library_data_t::complex_float;
+ }
+ else if (scaling_type == library_data_t::real_double &&
+ c_type == library_data_t::complex_double)
+ {
+ scaling_type = library_data_t::complex_double;
+ }
+
+ std::uint64_t key =
+ detail::get_type_combination_id(a_type, b_type, c_type, scaling_type);
+ switch (key)
+ {
+ case detail::get_type_combination_id(
+ library_data_t::real_float, library_data_t::real_float,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<float, float, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_double, library_data_t::real_double,
+ library_data_t::real_double, library_data_t::real_double):
+ {
+ detail::gemm_batch_impl<double, double, double, double>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::complex_float, library_data_t::complex_float,
+ library_data_t::complex_float, library_data_t::complex_float):
+ {
+ detail::gemm_batch_impl<std::complex<float>, std::complex<float>,
+ std::complex<float>, std::complex<float>>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::complex_double, library_data_t::complex_double,
+ library_data_t::complex_double, library_data_t::complex_double):
+ {
+ detail::gemm_batch_impl<std::complex<double>, std::complex<double>,
+ std::complex<double>, std::complex<double>>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_half, library_data_t::real_half):
+ {
+ detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half,
+ sycl::half>(q, a_trans, b_trans, m, n, k, alpha,
+ a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+#ifdef __INTEL_MKL__
+ case detail::get_type_combination_id(
+ library_data_t::real_bfloat16, library_data_t::real_bfloat16,
+ library_data_t::real_bfloat16, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c,
+ batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_bfloat16, library_data_t::real_bfloat16,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<oneapi::mkl::bfloat16, oneapi::mkl::bfloat16, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b, beta, c, ldc, stride_c,
+ batch_size);
+ break;
+ }
+#endif
+ case detail::get_type_combination_id(
+ library_data_t::real_int8, library_data_t::real_int8,
+ library_data_t::real_int32, library_data_t::real_int32):
+ {
+ detail::gemm_batch_impl<std::int8_t, std::int8_t, std::int32_t,
+ std::int32_t>(q, a_trans, b_trans, m, n, k, alpha,
+ a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_int8, library_data_t::real_int8,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<std::int8_t, std::int8_t, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_float, library_data_t::real_float):
+ {
+ detail::gemm_batch_impl<sycl::half, sycl::half, float, float>(
+ q, a_trans, b_trans, m, n, k, alpha, a, lda, stride_a, b, ldb, stride_b,
+ beta, c, ldc, stride_c, batch_size);
+ break;
+ }
+ case detail::get_type_combination_id(
+ library_data_t::real_half, library_data_t::real_half,
+ library_data_t::real_half, library_data_t::real_float):
+ {
+ float alpha_value =
+ dpct::get_value(reinterpret_cast<const float *>(alpha), q);
+ float beta_value =
+ dpct::get_value(reinterpret_cast<const float *>(beta), q);
+ sycl::half alpha_half(alpha_value);
+ sycl::half beta_half(beta_value);
+ detail::gemm_batch_impl<sycl::half, sycl::half, sycl::half, sycl::half>(
+ q, a_trans, b_trans, m, n, k, &alpha_half, a, lda, stride_a, b, ldb, stride_b,
+ &beta_half, c, ldc, stride_c, batch_size);
+ break;
+ }
+ default:
+ throw std::runtime_error("the combination of data type is unsupported");
+ }
+ }
+
+ static inline void
+ async_dpct_memcpy(void *to_ptr, size_t to_pitch, const void *from_ptr,
+ size_t from_pitch, size_t x, size_t y,
+ memcpy_direction direction = automatic,
+ sycl::queue &q = get_default_queue())
+ {
+ detail::dpct_memcpy(q, to_ptr, from_ptr, to_pitch, from_pitch, x, y,
+ direction);
+ }
+
+ using err0 = detail::generic_error_type<struct err0_tag, int>;
+ using err1 = detail::generic_error_type<struct err1_tag, int>;
+
+ static inline void dpct_free(void *ptr, sycl::queue &q = get_default_queue()) {
+ detail::dpct_free(ptr, q);
+ }
+
+ /// dpct accessor used as device function parameter.
+ template <class T, memory_region Memory, size_t Dimension> class accessor;
+ template <class T, memory_region Memory> class accessor<T, Memory, 3> {
+ public:
+ using memory_t = detail::memory_traits<Memory, T>;
+ using element_t = typename memory_t::element_t;
+ using pointer_t = typename memory_t::pointer_t;
+ using accessor_t = typename memory_t::template accessor_t<3>;
+ accessor(pointer_t data, const sycl::range<3> &in_range)
+ : _data(data), _range(in_range) {}
+ template <memory_region M = Memory>
+ accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
+ : accessor(acc, acc.get_range()) {}
+ accessor(const accessor_t &acc, const sycl::range<3> &in_range)
+ : accessor(acc.get_pointer(), in_range) {}
+ accessor<T, Memory, 2> operator[](size_t index) const {
+ sycl::range<2> sub(_range.get(1), _range.get(2));
+ return accessor<T, Memory, 2>(_data + index * sub.size(), sub);
+ }
+
+ pointer_t get_ptr() const { return _data; }
+
+ private:
+ pointer_t _data;
+ sycl::range<3> _range;
+ };
+ template <class T, memory_region Memory> class accessor<T, Memory, 2> {
+ public:
+ using memory_t = detail::memory_traits<Memory, T>;
+ using element_t = typename memory_t::element_t;
+ using pointer_t = typename memory_t::pointer_t;
+ using accessor_t = typename memory_t::template accessor_t<2>;
+ accessor(pointer_t data, const sycl::range<2> &in_range)
+ : _data(data), _range(in_range) {}
+ template <memory_region M = Memory>
+ accessor(typename std::enable_if<M != local, const accessor_t>::type &acc)
+ : accessor(acc, acc.get_range()) {}
+ accessor(const accessor_t &acc, const sycl::range<2> &in_range)
+ : accessor(acc.get_pointer(), in_range) {}
+
+ pointer_t operator[](size_t index) const {
+ return _data + _range.get(1) * index;
+ }
+
+ pointer_t get_ptr() const { return _data; }
+
+ private:
+ pointer_t _data;
+ sycl::range<2> _range;
+ };
+
+ namespace detail {
+ /// Device variable with address space of shared, global or constant.
+ template <class T, memory_region Memory, size_t Dimension> class device_memory {
+ public:
+ using accessor_t =
+ typename detail::memory_traits<Memory,
+ T>::template accessor_t<Dimension>;
+ using value_t = typename detail::memory_traits<Memory, T>::value_t;
+ using dpct_accessor_t = dpct::accessor<T, Memory, Dimension>;
+
+ device_memory() : device_memory(sycl::range<Dimension>(1)) {}
+
+ /// Constructor of 1-D array with initializer list
+ device_memory(const sycl::range<Dimension> &in_range,
+ std::initializer_list<value_t> &&init_list)
+ : device_memory(in_range) {
+ assert(init_list.size() <= in_range.size());
+ _host_ptr = (value_t *)std::malloc(_size);
+ std::memset(_host_ptr, 0, _size);
+ std::memcpy(_host_ptr, init_list.begin(), init_list.size() * sizeof(T));
+ }
+
+ /// Constructor of 2-D array with initializer list
+ template <size_t D = Dimension>
+ device_memory(
+ const typename std::enable_if<D == 2, sycl::range<2>>::type &in_range,
+ std::initializer_list<std::initializer_list<value_t>> &&init_list)
+ : device_memory(in_range) {
+ assert(init_list.size() <= in_range[0]);
+ _host_ptr = (value_t *)std::malloc(_size);
+ std::memset(_host_ptr, 0, _size);
+ auto tmp_data = _host_ptr;
+ for (auto sub_list : init_list) {
+ assert(sub_list.size() <= in_range[1]);
+ std::memcpy(tmp_data, sub_list.begin(),
+ sub_list.size() * sizeof(T));
+ tmp_data += in_range[1];
+ }
+ }
+
+ /// Constructor with range
+ device_memory(const sycl::range<Dimension> &range_in)
+ : _size(range_in.size() * sizeof(T)), _range(range_in),
+ _reference(false), _host_ptr(nullptr), _device_ptr(nullptr) {
+ static_assert(
+ (Memory == global) || (Memory == constant) || (Memory == shared),
+ "device memory region should be global, constant or shared");
+ // Make sure that singleton class mem_mgr and dev_mgr will destruct
+ // later than this.
+ detail::mem_mgr::instance();
+ dev_mgr::instance();
+ }
+
+ /// Constructor with range
+ template <class... Args>
+ device_memory(Args... Arguments)
+ : device_memory(sycl::range<Dimension>(Arguments...)) {}
+
+ ~device_memory() {
+ if (_device_ptr && !_reference)
+ dpct::dpct_free(_device_ptr);
+ if (_host_ptr)
+ std::free(_host_ptr);
+ }
+
+ /// Allocate memory with default queue, and init memory if has initial
+ /// value.
+ void init() { init(dpct::get_default_queue()); }
+ /// Allocate memory with specified queue, and init memory if has initial
+ /// value.
+ void init(sycl::queue &q) {
+ if (_device_ptr)
+ return;
+ if (!_size)
+ return;
+ allocate_device(q);
+ if (_host_ptr)
+ detail::dpct_memcpy(q, _device_ptr, _host_ptr, _size,
+ host_to_device);
+ }
+
+ /// The variable is assigned to a device pointer.
+ void assign(value_t *src, size_t size) {
+ this->~device_memory();
+ new (this) device_memory(src, size);
+ }
+
+ /// Get memory pointer of the memory object, which is virtual pointer when
+ /// usm is not used, and device pointer when usm is used.
+ value_t *get_ptr() { return get_ptr(get_default_queue()); }
+ /// Get memory pointer of the memory object, which is virtual pointer when
+ /// usm is not used, and device pointer when usm is used.
+ value_t *get_ptr(sycl::queue &q) {
+ init(q);
+ return _device_ptr;
+ }
+
+ /// Get the device memory object size in bytes.
+ size_t get_size() { return _size; }
+
+ template <size_t D = Dimension>
+ typename std::enable_if<D == 1, T>::type &operator[](size_t index) {
+ init();
+ return _device_ptr[index];
+ }
+
+ /// Get dpct::accessor with dimension info for the device memory object
+ /// when usm is used and dimension is greater than 1.
+ template <size_t D = Dimension>
+ typename std::enable_if<D != 1, dpct_accessor_t>::type
+ get_access([[maybe_unused]] sycl::handler &cgh) {
+ return dpct_accessor_t((T *)_device_ptr, _range);
+ }
+
+ private:
+ device_memory(value_t *memory_ptr, size_t size)
+ : _size(size), _range(size / sizeof(T)), _reference(true),
+ _device_ptr(memory_ptr) {}
+
+ void allocate_device(sycl::queue &q) {
+ #ifndef DPCT_USM_LEVEL_NONE
+ if (Memory == shared) {
+ _device_ptr = (value_t *)sycl::malloc_shared(_size, q.get_device(),
+ q.get_context());
+ return;
+ }
+ #ifdef SYCL_EXT_ONEAPI_USM_DEVICE_READ_ONLY
+ if (Memory == constant) {
+ _device_ptr = (value_t *)sycl::malloc_device(
+ _size, q.get_device(), q.get_context(),
+ sycl::ext::oneapi::property::usm::device_read_only());
+ return;
+ }
+ #endif
+ #endif
+ _device_ptr = (value_t *)detail::dpct_malloc(_size, q);
+ }
+
+ size_t _size;
+ sycl::range<Dimension> _range;
+ bool _reference;
+ value_t *_host_ptr;
+ value_t *_device_ptr;
+ };
+ template <class T, memory_region Memory>
+ class device_memory<T, Memory, 0> : public device_memory<T, Memory, 1> {
+ public:
+ using base = device_memory<T, Memory, 1>;
+ using value_t = typename base::value_t;
+ using accessor_t =
+ typename detail::memory_traits<Memory, T>::template accessor_t<0>;
+
+ /// Constructor with initial value.
+ device_memory(const value_t &val) : base(sycl::range<1>(1), {val}) {}
+
+ /// Default constructor
+ device_memory() : base(1) {}
+ };
+ } // namespace detail
+
+ template <class T, size_t Dimension>
+ using global_memory = detail::device_memory<T, global, Dimension>;
+ template <class T, size_t Dimension>
+ using constant_memory = detail::device_memory<T, constant, Dimension>;
+ template <class T, size_t Dimension>
+ using shared_memory = detail::device_memory<T, shared, Dimension>;
+
+
+ template <typename T,
+ sycl::access::address_space addressSpace =
+ sycl::access::address_space::global_space,
+ sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
+ sycl::memory_scope memoryScope = sycl::memory_scope::device>
+ inline T atomic_fetch_add(T *addr, T operand) {
+ auto atm =
+ sycl::atomic_ref<T, memoryOrder, memoryScope, addressSpace>(addr[0]);
+ return atm.fetch_add(operand);
+ }
+
+ template <sycl::access::address_space addressSpace =
+ sycl::access::address_space::global_space,
+ sycl::memory_order memoryOrder = sycl::memory_order::relaxed,
+ sycl::memory_scope memoryScope = sycl::memory_scope::device,
+ typename T1, typename T2>
+ inline T1 atomic_fetch_add(T1 *addr, T2 operand) {
+ auto atm =
+ sycl::atomic_ref<T1, memoryOrder, memoryScope, addressSpace>(addr[0]);
+ return atm.fetch_add(operand);
+ }
+
+ template <typename T, sycl::access::address_space addressSpace =
+ sycl::access::address_space::global_space>
+ inline T atomic_fetch_add(T *addr, T operand,
+ sycl::memory_order memoryOrder) {
+ switch (memoryOrder) {
+ case sycl::memory_order::relaxed:
+ return atomic_fetch_add<T, addressSpace, sycl::memory_order::relaxed,
+ sycl::memory_scope::device>(addr, operand);
+ case sycl::memory_order::acq_rel:
+ return atomic_fetch_add<T, addressSpace, sycl::memory_order::acq_rel,
+ sycl::memory_scope::device>(addr, operand);
+ case sycl::memory_order::seq_cst:
+ return atomic_fetch_add<T, addressSpace, sycl::memory_order::seq_cst,
+ sycl::memory_scope::device>(addr, operand);
+ default:
+ assert(false && "Invalid memory_order for atomics. Valid memory_order for "
+ "atomics are: sycl::memory_order::relaxed, "
+ "sycl::memory_order::acq_rel, sycl::memory_order::seq_cst!");
+ }
+ }
+
+ template <sycl::access::address_space addressSpace =
+ sycl::access::address_space::global_space,
+ typename T1, typename T2>
+ inline T1 atomic_fetch_add(T1 *addr, T2 operand,
+ sycl::memory_order memoryOrder) {
+ atomic_fetch_add<T1, addressSpace>(addr, operand, memoryOrder);
+ }
+
+ inline unsigned int byte_level_permute(
+ unsigned int a, unsigned int b, unsigned int s) {
+ unsigned int ret;
+ ret = ((((std::uint64_t)b << 32 | a) >> (s & 0x7) * 8) & 0xff) |
+ (((((std::uint64_t)b << 32 | a) >> ((s >> 4) & 0x7) * 8) & 0xff)
+ << 8) |
+ (((((std::uint64_t)b << 32 | a) >> ((s >> 8) & 0x7) * 8) & 0xff)
+ << 16) |
+ (((((std::uint64_t)b << 32 | a) >> ((s >> 12) & 0x7) * 8) & 0xff)
+ << 24);
+ return ret;
+ }
+
+ inline uint32_t byte_level_permute_custom(
+ uint32_t low32, uint32_t high32, uint32_t sel, int mode = 0) {
+ constexpr uint16_t lookup[6][4] = {
+ {0x3210, 0x4321, 0x5432, 0x6543}, // Forward 4-byte extract
+ {0x5670, 0x6701, 0x7012, 0x0123}, // Backward 4-byte extract
+ {0x0000, 0x1111, 0x2222, 0x3333}, // Replicate 8-bit values
+ {0x3210, 0x3211, 0x3222, 0x3333}, // Edge clamp left
+ {0x0000, 0x1110, 0x2210, 0x3210}, // Edge clamp right
+ {0x1010, 0x3232, 0x1010, 0x3232} // Replicate 16-bit values
+ };
+
+ if (mode >= 1 && mode <= 6) {
+ return byte_level_permute(low32, high32, lookup[mode - 1][sel & 0x3]);
+ } else if (!mode) {
+ return byte_level_permute(low32, high32, sel);
+ }
+ return 0;
+ }
+
+} // COPY from DPCT head files
+
+#endif // GGML_SYCL_DPCT_HELPER_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp b/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp
new file mode 100644
index 0000000..00d54b8
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/element_wise.cpp
@@ -0,0 +1,1216 @@
+#include "common.hpp"
+#include "ggml-sycl/presets.hpp"
+#include "ggml.h"
+#include "element_wise.hpp"
+
+#define SYCL_GLOBAL_ID_LOOP(K, ITEM) \
+ for (auto i = ITEM.get_global_id(0); i < (size_t)K; i += ITEM.get_global_range(0))
+
+#define SYCL_LOCAL_ID_CALC(ITEM, IDX) \
+ (ITEM.get_local_range(IDX) * ITEM.get_group(IDX) + ITEM.get_local_id(IDX))
+
+
+static void acc_f32(const float * x, const float * y, float * dst, const int ne,
+ const int ne10, const int ne11, const int ne12,
+ const int nb1, const int nb2, int offset, const sycl::nd_item<1> &item_ct1) {
+ const int i = SYCL_LOCAL_ID_CALC(item_ct1, 0);
+ if (i >= ne) {
+ return;
+ }
+ int src1_idx = i - offset;
+ int oz = src1_idx / nb2;
+ int oy = (src1_idx - (oz * nb2)) / nb1;
+ int ox = src1_idx % nb1;
+ if (src1_idx >= 0 && ox < ne10 && oy < ne11 && oz < ne12) {
+ dst[i] = x[i] + y[ox + oy * ne10 + oz * ne10 * ne11];
+ } else {
+ dst[i] = x[i];
+ }
+}
+
+/* Unary OP funcs */
+template<typename T>
+static __dpct_inline__ T op_sgn(T x) {
+ return x > static_cast<T>(0.f) ? static_cast<T>(1.f) : ((x < static_cast<T>(0.f) ? static_cast<T>(-1.f) : static_cast<T>(0.f)));
+}
+
+template<typename T>
+static __dpct_inline__ T op_abs(T x) {
+ return sycl::fabs(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_elu(T x) {
+ return (x > static_cast<T>(0.f)) ? x : sycl::expm1(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_gelu(T x) {
+ const T GELU_COEF_A = static_cast<T>(0.044715f);
+ const T SQRT_2_OVER_PI = static_cast<T>(0.79788456080286535587989211986876f);
+ return static_cast<T>(0.5f) * x *
+ (static_cast<T>(1.0f) +
+ sycl::tanh(SQRT_2_OVER_PI * x * (static_cast<T>(1.0f) + GELU_COEF_A * x * x)));
+}
+
+template<typename T>
+static __dpct_inline__ T op_silu(T x) {
+ return x / (static_cast<T>(1.0f) + sycl::native::exp(-x));
+}
+
+template<typename T>
+static __dpct_inline__ T op_gelu_quick(T x) {
+ const T GELU_QUICK_COEF_LOCAL = static_cast<T>(-1.702f);
+ return x * (static_cast<T>(1.0f) / (static_cast<T>(1.0f) + sycl::native::exp(GELU_QUICK_COEF_LOCAL * x)));
+}
+
+template<typename T>
+static __dpct_inline__ T op_gelu_erf(T x) {
+ const T SQRT_2_INV = static_cast<T>(0.70710678118654752440084436210484f);
+ return static_cast<T>(0.5f) * x * (static_cast<T>(1.0f) + sycl::erf(x * SQRT_2_INV));
+}
+
+template<typename T>
+static __dpct_inline__ T op_tanh(T x) {
+ return sycl::tanh(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_relu(T x) {
+ return sycl::fmax(x, static_cast<T>(0));
+}
+
+template<typename T>
+static __dpct_inline__ T op_sigmoid(T x) {
+ return static_cast<T>(1.0f) / (static_cast<T>(1.0f) + sycl::native::exp(-x));
+}
+
+template<typename T>
+static __dpct_inline__ T op_sqrt(T x) {
+ return sycl::sqrt(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_sin(T x) {
+ return sycl::sin(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_cos(T x) {
+ return sycl::cos(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_hardsigmoid(T x) {
+ return sycl::fmin(static_cast<T>(1.0f), sycl::fmax(static_cast<T>(0.0f), (x + static_cast<T>(3.0f)) / static_cast<T>(6.0f)));
+}
+
+template<typename T>
+static __dpct_inline__ T op_hardswish(T x) {
+ return x * sycl::fmin(static_cast<T>(1.0f), sycl::fmax(static_cast<T>(0.0f), (x + static_cast<T>(3.0f)) / static_cast<T>(6.0f)));
+}
+
+template<typename T>
+static __dpct_inline__ T op_exp(T x) {
+ return sycl::exp(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_log(T x) {
+ if (x <= static_cast<T>(0)) {
+ return neg_infinity<T>();
+ }
+ return sycl::log(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_softplus(T x) {
+ const float xf = (float) x;
+ const float ax = sycl::fabs(xf);
+ const float m = sycl::fmax(xf, 0.0f);
+ const float y = m + sycl::log1p(sycl::exp(-ax));
+ return (T) y;
+}
+
+template<typename T>
+static __dpct_inline__ T op_neg(T x) {
+ return -x;
+}
+
+template<typename T>
+static __dpct_inline__ T op_step(T x) {
+ return (x > static_cast<T>(0.0f)) ? static_cast<T>(1.0f) : static_cast<T>(0.0f);
+}
+
+template<typename T>
+static __dpct_inline__ T op_leaky_relu(T x, float negative_slope) {
+ T neg_slope_T = static_cast<T>(negative_slope);
+ return sycl::fmax(x, static_cast<T>(0)) +
+ sycl::fmin(x, static_cast<T>(0.0f)) * neg_slope_T;
+}
+
+template<typename T>
+static __dpct_inline__ T op_sqr(T x) {
+ return x * x;
+}
+
+template<typename T>
+static __dpct_inline__ T op_clamp(T x, float min_val, float max_val) {
+ return x < static_cast<T>(min_val) ? static_cast<T>(min_val) : (x > static_cast<T>(max_val) ? static_cast<T>(max_val) : x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_floor(T x) {
+ return sycl::floor(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_ceil(T x) {
+ return sycl::ceil(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_round(T x) {
+ return sycl::round(x);
+}
+
+template<typename T>
+static __dpct_inline__ T op_trunc(T x) {
+ return sycl::trunc(x);
+}
+
+template<typename T, typename F>
+static void unary_op_generic_kernel(
+ const T * x,
+ T * dst,
+ const int k,
+ const int64_t ne0, const int64_t ne1, const int64_t ne2, const int64_t ne3,
+ const size_t nb0, const size_t nb1, const size_t nb2, const size_t nb3,
+ const size_t nbd0, const size_t nbd1, const size_t nbd2, const size_t nbd3,
+ const sycl::nd_item<1> & item_ct1,
+ F func) {
+
+ (void) ne3;
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ const int64_t i0 = i % ne0;
+ const int64_t i1 = (i / ne0) % ne1;
+ const int64_t i2 = (i / (ne0*ne1)) % ne2;
+ const int64_t i3 = i / (ne0*ne1*ne2);
+
+ const char * src_base = (const char *) x;
+ char * dst_base = (char *) dst;
+
+ const T * srcp = (const T *)(src_base + i0*nb0 + i1*nb1 + i2*nb2 + i3*nb3 );
+ T * dstp = (T *)(dst_base + i0*nbd0 + i1*nbd1 + i2*nbd2 + i3*nbd3);
+
+ *dstp = func(*srcp);
+ }
+}
+
+template<typename T>
+static void unary_op_sqrt_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_sqrt(x[i]);
+ }
+}
+
+template<typename T>
+static void unary_op_sin_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_sin(x[i]);
+ }
+}
+
+template<typename T>
+static void unary_op_cos_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_cos(x[i]);
+ }
+}
+
+template<typename T>
+static void unary_op_log_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_log(x[i]);
+ }
+}
+
+
+template<typename T>
+static void unary_op_leaky_relu_kernel(const T * x, T * dst, const int k, float negative_slope, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_leaky_relu(x[i], negative_slope);
+ }
+}
+
+template<typename T>
+static void unary_op_sqr_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_sqr(x[i]);
+ }
+}
+
+template<typename T>
+static void unary_op_clamp_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1, float min_val, float max_val) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_clamp(x[i], min_val, max_val);
+ }
+}
+
+template<typename T>
+static void unary_op_floor_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_floor(x[i]);
+ }
+}
+
+template<typename T>
+static void unary_op_ceil_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_ceil(x[i]);
+ }
+}
+
+template<typename T>
+static void unary_op_round_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_round(x[i]);
+ }
+}
+
+template<typename T>
+static void unary_op_trunc_kernel(const T * x, T * dst, const int k, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = op_trunc(x[i]);
+ }
+}
+
+template<typename T>
+static void upscale(const T *x, T *dst, const int nb00, const int nb01,
+ const int nb02, const int nb03, const int ne10, const int ne11,
+ const int ne12, const int ne13, const float sf0, const float sf1,
+ const float sf2, const float sf3, const sycl::nd_item<1> &item_ct1) {
+ int index = item_ct1.get_local_id(0) +
+ item_ct1.get_group(0) * item_ct1.get_local_range(0);
+ if (index >= ne10 * ne11 * ne12 * ne13) {
+ return;
+ }
+ // operation
+ int i10 = index % ne10;
+ int i11 = (index / ne10) % ne11;
+ int i12 = (index / (ne10 * ne11)) % ne12;
+ int i13 = (index / (ne10 * ne11 * ne12)) % ne13;
+
+ int i00 = static_cast<int>(i10 / sf0);
+ int i01 = static_cast<int>(i11 / sf1);
+ int i02 = static_cast<int>(i12 / sf2);
+ int i03 = static_cast<int>(i13 / sf3);
+
+ dst[index] = *(const T *)((const char *)x + i03 * nb03 + i02 * nb02 + i01 * nb01 + i00 * nb00);
+}
+
+template<typename T>
+static void clamp(const T * x, T * dst, const float min, const float max, const int k,
+ const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = x[i] < static_cast<T>(min) ? static_cast<T>(min) : (x[i] > static_cast<T>(max) ? static_cast<T>(max) : x[i]);
+ }
+}
+
+template<typename T>
+static void gated_op_fused_geglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ const int64_t j0 = (i / n) * o0 + (i % n);
+ const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n);
+ dst[i] = op_gelu(x[j0]) * g[j1];
+ }
+}
+
+template<typename T>
+static void gated_op_fused_reglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ const int64_t j0 = (i / n) * o0 + (i % n);
+ const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n);
+ dst[i] = op_relu(x[j0]) * g[j1];
+ }
+}
+
+template<typename T>
+static void gated_op_fused_swiglu(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ const int64_t j0 = (i / n) * o0 + (i % n);
+ const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n);
+ dst[i] = op_silu(x[j0]) * g[j1];
+ }
+}
+
+template<typename T>
+static void gated_op_fused_geglu_erf(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ const int64_t j0 = (i / n) * o0 + (i % n);
+ const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n);
+ dst[i] = op_gelu_erf(x[j0]) * g[j1];
+ }
+}
+
+template<typename T>
+static void gated_op_fused_geglu_quick(const T * x, const T * g, T * dst, const uint64_t k, const uint64_t n, const uint64_t o0, const uint64_t o1, const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ const int64_t j0 = (i / n) * o0 + (i % n);
+ const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n);
+ dst[i] = op_gelu_quick(x[j0]) * g[j1];
+ }
+}
+
+namespace ggml_sycl_detail {
+static void acc_f32_sycl(const float *x, const float *y, float *dst,
+ const int n_elements, const int ne10, const int ne11,
+ const int ne12, const int nb1, const int nb2,
+ const int offset, queue_ptr stream) {
+ int num_blocks = ceil_div(n_elements, SYCL_ACC_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) *
+ sycl::range<1>(SYCL_ACC_BLOCK_SIZE),
+ sycl::range<1>(SYCL_ACC_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ acc_f32(x, y, dst, n_elements, ne10, ne11, ne12, nb1, nb2, offset,
+ item_ct1);
+ });
+}
+
+template<typename T>
+static void arange_kernel(T * dst, const int k, T start, T step,
+ const sycl::nd_item<1> &item_ct1) {
+ SYCL_GLOBAL_ID_LOOP(k, item_ct1) {
+ dst[i] = start + static_cast<T>(i) * step;
+ }
+}
+
+template<typename T>
+static void upscale_sycl(const T *x, T *dst, const int nb00, const int nb01,
+ const int nb02, const int nb03, const int ne10, const int ne11,
+ const int ne12, const int ne13, const float sf0, const float sf1,
+ const float sf2, const float sf3, queue_ptr stream) {
+ int dst_size = ne10 * ne11 * ne12 * ne13;
+ int num_blocks = ceil_div(dst_size, SYCL_UPSCALE_BLOCK_SIZE);
+ sycl::range<1> gridDim(num_blocks * SYCL_UPSCALE_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(gridDim, sycl::range<1>(SYCL_UPSCALE_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) {
+ upscale(x, dst, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, item_ct1);
+ });
+}
+
+template<typename KernelInvoker, typename... Args>
+static inline void dispatch_ggml_sycl_op_unary(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ kernel_invoker(data_pts.src, data_pts.dst, (int)ggml_nelements(dst->src[0]), main_stream, std::forward<Args>(args)...);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ kernel_invoker(data_pts.src, data_pts.dst, (int)ggml_nelements(dst->src[0]), main_stream, std::forward<Args>(args)...);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ }
+}
+
+template<typename KernelInvoker, typename... Args>
+static inline void dispatch_ggml_sycl_op_fused_glu(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const ggml_tensor * src0 = dst->src[0];
+ const ggml_tensor * src1 = dst->src[1];
+ const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2;;
+ GGML_ASSERT(dst->ne[0] == nc);
+ GGML_ASSERT(ggml_is_contiguous_1(dst->src[0]));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+ const int32_t swapped = ((const int32_t *) dst->op_params)[1];
+ void * src0_d = src0->data;
+ void * src1_d = src1 ? src1->data : src0->data;
+ const int64_t src0_o = src0->nb[1];
+ const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1];
+ void * dst_d = dst->data;
+ if (src1) {
+ GGML_ASSERT(ggml_is_contiguous_1(src1));
+ GGML_ASSERT(src1->nb[0] == ggml_element_size(src1));
+ GGML_ASSERT(src1->ne[0] == nc);
+ GGML_ASSERT(src0->type == src1->type);
+ }
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ sycl::half * src0_p = (sycl::half *) src0_d;
+ sycl::half * src1_p = (sycl::half *) src1_d;
+
+ if (!src1) {
+ src0_p += swapped ? nc : 0;
+ src1_p += swapped ? 0 : nc;
+ }
+ kernel_invoker(src0_p,
+ src1_p,
+ (sycl::half *) dst_d,
+ ggml_nelements(dst),
+ nc,
+ src0_o / sizeof(sycl::half),
+ src1_o / sizeof(sycl::half),
+ main_stream,
+ std::forward<Args>(args)...);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ float * src0_p = (float *) src0_d;
+ float * src1_p = (float *) src1_d;
+
+ if (!src1) {
+ src0_p += swapped ? nc : 0;
+ src1_p += swapped ? 0 : nc;
+ }
+
+ kernel_invoker(src0_p,
+ src1_p,
+ (float *) dst_d,
+ ggml_nelements(dst),
+ nc,
+ src0_o / sizeof(float),
+ src1_o / sizeof(float),
+ main_stream,
+ std::forward<Args>(args)...);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ }
+}
+
+template<typename KernelInvoker, typename... Args>
+static inline void dispatch_ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst, KernelInvoker kernel_invoker, Args&&... args) {
+#if defined (GGML_SYCL_F16)
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+#else
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+#endif
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ const float sf0 = (float) dst->ne[0] / dst->src[0]->ne[0];
+ const float sf1 = (float) dst->ne[1] / dst->src[0]->ne[1];
+ const float sf2 = (float) dst->ne[2] / dst->src[0]->ne[2];
+ const float sf3 = (float) dst->ne[3] / dst->src[0]->ne[3];
+ switch (dst->type) {
+#if defined (GGML_SYCL_F16)
+ case GGML_TYPE_F16:
+ {
+ auto data_pts = cast_data<sycl::half>(dst);
+ kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->nb[0], (int)dst->src[0]->nb[1], (int)dst->src[0]->nb[2],
+ (int)dst->src[0]->nb[3], (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], sf0, sf1, sf2, sf3,
+ main_stream, std::forward<Args>(args)...);
+ break;
+ }
+#endif
+ case GGML_TYPE_F32:
+ {
+ auto data_pts = cast_data<float>(dst);
+ kernel_invoker(data_pts.src, data_pts.dst, (int)dst->src[0]->nb[0], (int)dst->src[0]->nb[1], (int)dst->src[0]->nb[2],
+ (int)dst->src[0]->nb[3], (int)dst->ne[0], (int)dst->ne[1], (int)dst->ne[2], (int)dst->ne[3], sf0, sf1, sf2, sf3,
+ main_stream, std::forward<Args>(args)...);
+ break;
+ }
+ default:
+ GGML_ABORT("GGML tensor type not supported!\n");
+ }
+}
+
+template<typename F>
+static inline void ggml_sycl_op_unary(
+ ggml_backend_sycl_context & ctx, ggml_tensor * dst, F func) {
+
+ ggml_tensor * src0 = dst->src[0];
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+ const int64_t ne2 = dst->ne[2];
+ const int64_t ne3 = dst->ne[3];
+
+ const size_t nb0 = src0->nb[0];
+ const size_t nb1 = src0->nb[1];
+ const size_t nb2 = src0->nb[2];
+ const size_t nb3 = src0->nb[3];
+
+ const size_t nbd0 = dst->nb[0];
+ const size_t nbd1 = dst->nb[1];
+ const size_t nbd2 = dst->nb[2];
+ const size_t nbd3 = dst->nb[3];
+
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [=](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+
+ const int num_blocks = ceil_div(k_elements, 256);
+
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256),
+ sycl::range<1>(256)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_generic_kernel(
+ src, dst_ptr, k_elements,
+ ne0, ne1, ne2, ne3,
+ nb0, nb1, nb2, nb3,
+ nbd0, nbd1, nbd2, nbd3,
+ item_ct1,
+ func
+ );
+ });
+ });
+}
+
+
+static inline void ggml_sycl_op_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ float start, stop, step;
+ memcpy(&start, dst->op_params, sizeof(float));
+ memcpy(&stop, (float *) dst->op_params + 1, sizeof(float));
+ memcpy(&step, (float *) dst->op_params + 2, sizeof(float));
+ dpct::queue_ptr stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ float * dst_ptr = (float *)dst->data;
+ const int k = (int)ggml_nelements(dst);
+ const int num_blocks = ceil_div(k, SYCL_ARANGE_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_ARANGE_BLOCK_SIZE),
+ sycl::range<1>(SYCL_ARANGE_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ arange_kernel(dst_ptr, k, start, step, item_ct1);
+ });
+}
+
+} // namespace ggml_sycl_detail
+
+
+
+static inline void ggml_sycl_op_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_sgn(x);
+ });
+}
+
+
+static inline void ggml_sycl_op_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_abs(x);
+ });
+}
+
+static inline void ggml_sycl_op_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_elu(x);
+ });
+}
+static inline void ggml_sycl_op_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_silu(x);
+ });
+}
+
+static inline void ggml_sycl_op_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_gelu(x);
+ });
+}
+
+static inline void ggml_sycl_op_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_gelu_quick(x);
+ });
+}
+
+static inline void ggml_sycl_op_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_gelu_erf(x);
+ });
+}
+
+static inline void ggml_sycl_op_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_tanh(x);
+ });
+}
+
+static inline void ggml_sycl_op_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_relu(x);
+ });
+}
+
+static inline void ggml_sycl_op_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_hardsigmoid(x);
+ });
+}
+
+static inline void ggml_sycl_op_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_hardswish(x);
+ });
+}
+
+static inline void ggml_sycl_op_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_exp(x);
+ });
+}
+
+static inline void ggml_sycl_op_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, SYCL_EXP_BLOCK_SIZE); // Using EXP block size
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_EXP_BLOCK_SIZE),
+ sycl::range<1>(SYCL_EXP_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_log_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_softplus(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_softplus(x);
+ });
+}
+
+static inline void ggml_sycl_op_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_neg(x);
+ });
+}
+
+
+static inline void ggml_sycl_op_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_step(x);
+ });
+}
+
+static inline void ggml_sycl_op_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_sigmoid(x);
+ });
+}
+
+static inline void ggml_sycl_op_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, SYCL_SQRT_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SQRT_BLOCK_SIZE),
+ sycl::range<1>(SYCL_SQRT_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_sqrt_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, SYCL_SIN_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIN_BLOCK_SIZE),
+ sycl::range<1>(SYCL_SIN_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_sin_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, SYCL_SIN_BLOCK_SIZE); // Using SIN block size
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SIN_BLOCK_SIZE),
+ sycl::range<1>(SYCL_SIN_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_cos_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ float negative_slope;
+ memcpy(&negative_slope, dst->op_params, sizeof(float));
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream, float slope) {
+ const int num_blocks = ceil_div(k_elements, SYCL_RELU_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_RELU_BLOCK_SIZE),
+ sycl::range<1>(SYCL_RELU_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_leaky_relu_kernel(src, dst_ptr, k_elements, slope, item_ct1);
+ });
+ }, negative_slope);
+}
+
+static inline void ggml_sycl_op_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, SYCL_SQR_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_SQR_BLOCK_SIZE),
+ sycl::range<1>(SYCL_SQR_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_sqr_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_upscale(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int nb00, int nb01, int nb02, int nb03,
+ int ne10, int ne11, int ne12, int ne13, float sf0, float sf1, float sf2, float sf3,
+ queue_ptr stream) {
+ ggml_sycl_detail::upscale_sycl(src, dst_ptr, nb00, nb01, nb02, nb03, ne10, ne11, ne12, ne13, sf0, sf1, sf2, sf3, stream);
+ });
+}
+
+static inline void ggml_sycl_op_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ float min_val;
+ float max_val;
+ memcpy(&min_val, dst->op_params, sizeof(float));
+ memcpy(&max_val, (float *) dst->op_params + 1, sizeof(float));
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream, float min_arg, float max_arg) {
+ const int num_blocks = ceil_div(k_elements, SYCL_CLAMP_BLOCK_SIZE);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(SYCL_CLAMP_BLOCK_SIZE),
+ sycl::range<1>(SYCL_CLAMP_BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ clamp(src, dst_ptr, min_arg, max_arg, k_elements, item_ct1);
+ });
+ }, min_val, max_val);
+}
+
+static inline void ggml_sycl_op_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, 256);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256),
+ sycl::range<1>(256)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_floor_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::ggml_sycl_op_unary(ctx, dst, [](auto x) {
+ return op_ceil(x);
+ });
+}
+
+static inline void ggml_sycl_op_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, 256);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256),
+ sycl::range<1>(256)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_round_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_unary(ctx, dst,
+ [](const auto* src, auto* dst_ptr, int k_elements, queue_ptr stream) {
+ const int num_blocks = ceil_div(k_elements, 256);
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks) * sycl::range<1>(256),
+ sycl::range<1>(256)),
+ [=](sycl::nd_item<1> item_ct1) {
+ unary_op_trunc_kernel(src, dst_ptr, k_elements, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_acc(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->ne[3] == 1); // just 3D tensors supported
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ const float * src1_dd = static_cast<const float*>(dst->src[1]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ int nb1 = dst->op_params[0] / 4; // 4 bytes of float32
+ int nb2 = dst->op_params[1] / 4; // 4 bytes of float32
+ // int nb3 = dst->op_params[2] / 4; // 4 bytes of float32 - unused
+ int offset = dst->op_params[3] / 4; // offset in bytes
+
+ ggml_sycl_detail::acc_f32_sycl(src0_dd, src1_dd, dst_dd, (int)ggml_nelements(dst), (int)dst->src[1]->ne[0], (int)dst->src[1]->ne[1], (int)dst->src[1]->ne[2], nb1, nb2, offset, main_stream);
+}
+
+static inline void ggml_sycl_op_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst,
+ [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) {
+ const uint32_t num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE);
+ main_stream->parallel_for(
+ sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) {
+ gated_op_fused_geglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst,
+ [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) {
+ const uint32_t num_blocks = ceil_div((uint32_t)k, SYCL_RELU_BLOCK_SIZE); // Using RELU block size for reglu
+ main_stream->parallel_for(
+ sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), sycl::range<1>(SYCL_RELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) {
+ gated_op_fused_reglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst,
+ [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) {
+ const uint32_t num_blocks = ceil_div((uint32_t)k, SYCL_SILU_BLOCK_SIZE); // Using SILU block size for swiglu
+ main_stream->parallel_for(
+ sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), sycl::range<1>(SYCL_SILU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) {
+ gated_op_fused_swiglu(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1);
+ });
+ });
+}
+
+__dpct_inline__ float ggml_sycl_op_swiglu_oai_single(float x, float g, float alpha = 1.702f, float limit = 7.0f) {
+ x = sycl::fmin(x, limit);
+ g = sycl::fmax(sycl::fmin(g, limit), -limit);
+
+ float out_glu = x / (1.0f + sycl::native::exp(-x * alpha));
+ out_glu = out_glu * (1.0f + g);
+ return out_glu;
+}
+
+
+template <typename T>
+static void swiglu_oai_kernel(const T * x, const T * g, T * dst, const int64_t k,
+ const int64_t n, const int64_t o0, const int64_t o1,
+ float alpha, float limit, sycl::nd_item<3> item_ct1) {
+ const int64_t i = int64_t(item_ct1.get_local_range(2)) * item_ct1.get_group(2) + item_ct1.get_local_id(2);
+
+ if (i >= k) {
+ return;
+ }
+
+ const int64_t j0 = (i / n) * o0 + (i % n);
+ const int64_t j1 = o0 == o1 ? j0 : (i / n) * o1 + (i % n);
+
+ float xi = x[j0];
+ float gi = g[j1];
+
+ dst[i] = ggml_sycl_op_swiglu_oai_single(xi, gi, alpha, limit);
+}
+
+template <typename T>
+static void swiglu_oai_sycl(const T * x,
+ const T * g,
+ T * dst,
+ const int64_t k,
+ const int64_t n,
+ const int64_t o0,
+ const int64_t o1,
+ const float alpha,
+ const float limit,
+ dpct::queue_ptr stream) {
+ const int64_t num_blocks = (k + SYCL_GLU_BLOCK_SIZE - 1) / SYCL_GLU_BLOCK_SIZE;
+ stream->parallel_for(sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) * sycl::range<3>(1, 1, SYCL_GLU_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_GLU_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ swiglu_oai_kernel(x, g, dst, k, n, o0, o1, alpha, limit, item_ct1);
+ });
+}
+
+void ggml_sycl_op_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+ const ggml_tensor * src1 = dst->src[1];
+ void * src0_d = src0->data;
+ void * src1_d = src1 ? src1->data : src0->data;
+ const int64_t src0_o = src0->nb[1];
+ const int64_t src1_o = src1 ? src1->nb[1] : src0->nb[1];
+ void * dst_d = dst->data;
+ const int64_t nc = src1 ? src0->ne[0] : src0->ne[0] / 2;
+ dpct::queue_ptr stream = ctx.stream();
+
+ GGML_ASSERT(ggml_is_contiguous_1(src0));
+ GGML_ASSERT(src0->nb[0] == ggml_element_size(src0));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(src0->type == dst->type);
+ GGML_ASSERT(dst->ne[0] == nc);
+ GGML_ASSERT(ggml_nrows(dst) == ggml_nrows(src0));
+
+ if (src1) {
+ GGML_ASSERT(ggml_is_contiguous_1(src1));
+ GGML_ASSERT(src1->nb[0] == ggml_element_size(src1));
+ GGML_ASSERT(src1->ne[0] == nc);
+ GGML_ASSERT(src0->type == src1->type);
+ }
+
+ //const int32_t swapped = ((const int32_t *) dst->op_params)[1];
+ const int32_t swapped = ggml_get_op_params_i32(dst, 1);
+ const float alpha = ggml_get_op_params_f32(dst, 2);
+ const float limit = ggml_get_op_params_f32(dst, 3);
+
+ float * src0_p = (float *) src0_d;
+ float * src1_p = (float *) src1_d;
+
+ if (!src1) {
+ src0_p += swapped ? nc : 0;
+ src1_p += swapped ? 0 : nc;
+ }
+
+ swiglu_oai_sycl(src0_p, src1_p, (float *)dst_d, ggml_nelements(dst), nc, src0_o / sizeof(float), src1_o / sizeof(float), alpha, limit, stream);
+}
+
+static inline void ggml_sycl_op_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst,
+ [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) {
+ const uint32_t num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE);
+ main_stream->parallel_for(
+ sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) {
+ gated_op_fused_geglu_erf(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1);
+ });
+ });
+}
+
+static inline void ggml_sycl_op_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_sycl_detail::dispatch_ggml_sycl_op_fused_glu(ctx, dst,
+ [](const auto* x_ptr, const auto* g_ptr, auto* dst_ptr, uint64_t k, uint64_t n, uint64_t o0, uint64_t o1, queue_ptr main_stream) {
+ const uint32_t num_blocks = ceil_div(k, SYCL_GELU_BLOCK_SIZE);
+ main_stream->parallel_for(
+ sycl::nd_range<1>((num_blocks * sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), sycl::range<1>(SYCL_GELU_BLOCK_SIZE)), [=](sycl::nd_item<1> item_ct1) {
+ gated_op_fused_geglu_quick(x_ptr, g_ptr, dst_ptr, k, n, o0, o1, item_ct1);
+ });
+ });
+}
+
+
+void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_sqrt(ctx, dst);
+}
+
+void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_sin(ctx, dst);
+}
+
+void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_cos(ctx, dst);
+}
+
+void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_acc(ctx, dst);
+}
+
+void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_gelu(ctx, dst);
+}
+
+void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_silu(ctx, dst);
+}
+
+void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_gelu_quick(ctx, dst);
+}
+
+void ggml_sycl_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_gelu_erf(ctx, dst);
+}
+
+void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_tanh(ctx, dst);
+}
+
+void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_relu(ctx, dst);
+}
+
+void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_sigmoid(ctx, dst);
+}
+
+void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_hardsigmoid(ctx, dst);
+}
+
+void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_hardswish(ctx, dst);
+}
+
+void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_exp(ctx, dst);
+}
+
+void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_log(ctx, dst);
+}
+
+void ggml_sycl_softplus(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_softplus(ctx, dst);
+}
+
+void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_neg(ctx, dst);
+}
+
+void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_step(ctx, dst);
+}
+
+void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_leaky_relu(ctx, dst);
+}
+
+void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_sqr(ctx, dst);
+}
+
+void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_upscale(ctx, dst);
+}
+
+
+void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_clamp(ctx, dst);
+}
+
+void ggml_sycl_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_sgn(ctx, dst);
+}
+
+void ggml_sycl_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_abs(ctx, dst);
+}
+
+void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_elu(ctx, dst);
+}
+
+void ggml_sycl_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_geglu(ctx, dst);
+}
+
+void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_reglu(ctx, dst);
+}
+
+void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_swiglu(ctx, dst);
+}
+
+void ggml_sycl_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_swiglu_oai(ctx, dst);
+}
+
+void ggml_sycl_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_geglu_erf(ctx, dst);
+}
+
+void ggml_sycl_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_geglu_quick(ctx, dst);
+}
+
+void ggml_sycl_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/0);
+ ggml_sycl_detail::ggml_sycl_op_arange(ctx, dst);
+}
+
+void ggml_sycl_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_floor(ctx, dst);
+}
+
+void ggml_sycl_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_ceil(ctx, dst);
+}
+
+void ggml_sycl_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_round(ctx, dst);
+}
+
+void ggml_sycl_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_trunc(ctx, dst);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp b/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp
new file mode 100644
index 0000000..7c71974
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/element_wise.hpp
@@ -0,0 +1,96 @@
+#ifndef GGML_SYCL_ELEMENTWISE_HPP
+#define GGML_SYCL_ELEMENTWISE_HPP
+
+#include "common.hpp"
+#include "ggml.h"
+#include <limits> // For std::numeric_limits
+
+#define SYCL_GLU_BLOCK_SIZE 256
+
+template <typename T>
+T neg_infinity() {
+ return -std::numeric_limits<T>::infinity();
+}
+
+template<typename T_Dst, typename T_Src = T_Dst>
+struct typed_data {
+ const T_Src * src;
+ T_Dst * dst;
+};
+
+template<typename T_Dst, typename T_Src = T_Dst>
+typed_data<T_Dst, T_Src> cast_data(ggml_tensor * dst) {
+ return {
+ /* .src = */ static_cast<const T_Src *>(dst->src[0]->data),
+ /* .dst = */ static_cast<T_Dst *>(dst->data)
+ };
+}
+
+const float GELU_QUICK_COEF = -1.702f;
+
+
+void ggml_sycl_sqrt(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_sin(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_cos(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_acc(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_gelu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_silu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_gelu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_swiglu_oai(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_gelu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_tanh(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_sigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_hardsigmoid(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_hardswish(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_exp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_log(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_softplus(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_neg(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_step(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_leaky_relu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_sqr(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_upscale(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_clamp(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_sgn(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_abs(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_elu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_geglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_reglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_swiglu(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_geglu_erf(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_geglu_quick(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_floor(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_ceil(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_round(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+void ggml_sycl_trunc(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_arange(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_ELEMENTWISE_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/gemm.hpp b/llama.cpp/ggml/src/ggml-sycl/gemm.hpp
new file mode 100644
index 0000000..dcf6c7a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/gemm.hpp
@@ -0,0 +1,90 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_GEMM_HPP
+#define GGML_SYCL_GEMM_HPP
+
+#include "ggml-sycl.h"
+
+#if GGML_SYCL_DNNL
+
+#include "dnnl.hpp"
+#include "dnnl_sycl.hpp"
+
+class DnnlGemmWrapper {
+public:
+ using dt = dnnl::memory::data_type;
+ using tag = dnnl::memory::format_tag;
+
+ template<typename T>
+ static constexpr dt to_dt() {
+ if constexpr (std::is_same_v<T, float>) return dt::f32;
+ else if constexpr (std::is_same_v<T, sycl::half>) return dt::f16;
+ else static_assert(0);
+ }
+
+ static void gemm(ggml_backend_sycl_context & ctx, int m, int n, int k,
+ const void * a, dt at, dnnl_dim_t stra0, dnnl_dim_t stra1, dnnl_dim_t stra2,
+ const void * b, dt bt, dnnl_dim_t strb0, dnnl_dim_t strb1, dnnl_dim_t strb2,
+ void * c, dt ct, const queue_ptr & q, dnnl_dim_t batches_a, dnnl_dim_t batches_b) {
+
+ auto stream = ctx.stream_dnnl(q);
+ auto eng = ctx.engine_dnnl(q);
+
+ dnnl::memory::dims a_dims = {batches_a, m, k };
+ dnnl::memory::dims a_strides = {stra2, stra1, stra0};
+ const auto a_in_md = dnnl::memory::desc(a_dims, at, a_strides);
+
+ dnnl::memory::dims b_dims = {batches_b, k, n };
+ dnnl::memory::dims b_strides = {strb2, strb0, strb1};
+ const auto b_in_md = dnnl::memory::desc(b_dims, bt, b_strides);
+
+ dnnl::memory::dims c_dims = { std::max(batches_a, batches_b), m, n};
+ dnnl::memory::dims c_strides = {m*n, 1, m };
+ const auto c_md = dnnl::memory::desc(c_dims, ct, c_strides);
+ dnnl::primitive_attr primitive_attr;
+ primitive_attr.set_scratchpad_mode(dnnl::scratchpad_mode::user);
+
+#ifdef GGML_SYCL_F16
+ primitive_attr.set_fpmath_mode(dnnl::fpmath_mode::f16);
+#endif
+
+ auto a_mem = dnnl::memory(a_in_md, eng, const_cast<void*>(a));
+ auto b_mem = dnnl::memory(b_in_md, eng, const_cast<void*>(b));
+ auto matmul_pd = dnnl::matmul::primitive_desc(eng, a_in_md, b_in_md, c_md, primitive_attr);
+ auto c_mem = dnnl::memory(matmul_pd.dst_desc(), eng, c);
+
+ auto scratchpad_md = matmul_pd.scratchpad_desc();
+ auto scratchpad_mem = ctx.get_scratchpad_mem(scratchpad_md, eng, q);
+
+ auto matmul_prim = dnnl::matmul(matmul_pd);
+
+ std::unordered_map<int, dnnl::memory> matmul_args;
+ matmul_args.insert({ DNNL_ARG_SRC, a_mem });
+ matmul_args.insert({ DNNL_ARG_WEIGHTS, b_mem });
+
+ matmul_args.insert({ DNNL_ARG_DST, c_mem });
+ matmul_args.insert({ DNNL_ARG_SCRATCHPAD, scratchpad_mem });
+
+ matmul_prim.execute(stream, matmul_args);
+ }
+
+ static void row_gemm(ggml_backend_sycl_context & ctx, int m, int n, int k,
+ const void * a, dt at, const void * b, dt bt, void * c, dt ct, const queue_ptr & q) {
+
+ gemm(ctx, m, n, k, a, at, 1, k, k * m, b, bt, 1, k, n * k, c, ct, q, 1, 1);
+ }
+};
+
+#endif
+
+#endif // GGML_SYCL_GEMM_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/getrows.cpp b/llama.cpp/ggml/src/ggml-sycl/getrows.cpp
new file mode 100644
index 0000000..03f8dd9
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/getrows.cpp
@@ -0,0 +1,215 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include "ggml-impl.h"
+#include "common.hpp"
+#include "dequantize.hpp"
+#include "getrows.hpp"
+
+
+template<int qk, int qr, dequantize_kernel_t dequantize_kernel, typename dst_t>
+static void k_get_rows(
+ const void * src0, const int32_t * src1, dst_t * dst,
+ int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
+ /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
+ /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
+ /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
+ size_t s10, size_t s11, size_t s12,
+ const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
+
+ const int i00 = (item_ct1.get_group(2) * item_ct1.get_local_range(2) +
+ item_ct1.get_local_id(2)) *
+ 2;
+ const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
+ item_ct1.get_local_id(1);
+ const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
+ item_ct1.get_local_id(0)) /
+ ne12;
+ const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
+ item_ct1.get_local_id(0)) %
+ ne12;
+
+ if (i00 >= ne00) {
+ return;
+ }
+
+ const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
+
+ dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
+ const void * src0_row = (const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03;
+
+ const int ib = i00/qk; // block index
+ const int iqs = (i00%qk)/qr; // quant index
+ const int iybs = i00 - i00%qk; // dst block start index
+ const int y_offset = qr == 1 ? 1 : qk/2;
+
+ // dequantize
+ dfloat2 v;
+ dequantize_kernel(src0_row, ib, iqs, v);
+
+ dst_row[iybs + iqs + 0] = v.x();
+ dst_row[iybs + iqs + y_offset] = v.y();
+}
+
+template<typename src0_t, typename dst_t>
+static void k_get_rows_float(
+ const src0_t * src0, const int32_t * src1, dst_t * dst,
+ int64_t ne00, /*int64_t ne01, int64_t ne02, int64_t ne03,*/
+ /*int64_t ne10, int64_t ne11,*/ int64_t ne12, /*int64_t ne13,*/
+ /*size_t s0,*/ size_t s1, size_t s2, size_t s3,
+ /*size_t nb00,*/ size_t nb01, size_t nb02, size_t nb03,
+ size_t s10, size_t s11, size_t s12,
+ const sycl::nd_item<3> &item_ct1/*, size_t s13*/) {
+
+ const int i00 = item_ct1.get_group(2) * item_ct1.get_local_range(2) +
+ item_ct1.get_local_id(2);
+ const int i10 = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
+ item_ct1.get_local_id(1);
+ const int i11 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
+ item_ct1.get_local_id(0)) /
+ ne12;
+ const int i12 = (item_ct1.get_group(0) * item_ct1.get_local_range(0) +
+ item_ct1.get_local_id(0)) %
+ ne12;
+
+ if (i00 >= ne00) {
+ return;
+ }
+
+ const int i01 = src1[i10*s10 + i11*s11 + i12*s12];
+
+ dst_t * dst_row = dst + i10*s1 + i11*s2 + i12*s3;
+ const src0_t * src0_row = (const src0_t *)((const char *)src0 + i01*nb01 + i11*nb02 + i12*nb03);
+
+ dst_row[i00] = src0_row[i00];
+}
+
+template <int qk, int qr, dequantize_kernel_t dq>
+static void get_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor *src0, const ggml_tensor *src1,
+ ggml_tensor *dst, const void *src0_dd,
+ const int32_t *src1_dd, float *dst_dd,
+ queue_ptr stream) {
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
+ const int block_num_x = (ne00 + 2*SYCL_GET_ROWS_BLOCK_SIZE - 1) / (2*SYCL_GET_ROWS_BLOCK_SIZE);
+ const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
+
+ // strides in elements
+ //const size_t s0 = nb0 / ggml_element_size(dst);
+ const size_t s1 = nb1 / ggml_element_size(dst);
+ const size_t s2 = nb2 / ggml_element_size(dst);
+ const size_t s3 = nb3 / ggml_element_size(dst);
+
+ const size_t s10 = nb10 / ggml_element_size(src1);
+ const size_t s11 = nb11 / ggml_element_size(src1);
+ const size_t s12 = nb12 / ggml_element_size(src1);
+ //const size_t s13 = nb13 / ggml_element_size(src1);
+
+ GGML_ASSERT(ne00 % 2 == 0);
+
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_get_rows<qk, qr, dq>(
+ src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
+ s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
+ });
+
+ GGML_UNUSED(dst);
+ GGML_UNUSED(ctx);
+}
+
+template <typename src0_t>
+static void get_rows_sycl_float(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
+ const ggml_tensor *src1, ggml_tensor *dst,
+ const src0_t *src0_dd, const int32_t *src1_dd,
+ float *dst_dd, queue_ptr stream) {
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ const sycl::range<3> block_dims(1, 1, SYCL_GET_ROWS_BLOCK_SIZE);
+ const int block_num_x = (ne00 + SYCL_GET_ROWS_BLOCK_SIZE - 1) / SYCL_GET_ROWS_BLOCK_SIZE;
+ const sycl::range<3> block_nums(ne11 * ne12, ne10, block_num_x);
+
+ // strides in elements
+ //const size_t s0 = nb0 / ggml_element_size(dst);
+ const size_t s1 = nb1 / ggml_element_size(dst);
+ const size_t s2 = nb2 / ggml_element_size(dst);
+ const size_t s3 = nb3 / ggml_element_size(dst);
+
+ const size_t s10 = nb10 / ggml_element_size(src1);
+ const size_t s11 = nb11 / ggml_element_size(src1);
+ const size_t s12 = nb12 / ggml_element_size(src1);
+ //const size_t s13 = nb13 / ggml_element_size(src1);
+
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_get_rows_float(src0_dd, src1_dd, dst_dd, ne00, ne12, s1, s2,
+ s3, nb01, nb02, nb03, s10, s11, s12, item_ct1);
+ });
+ }
+
+ GGML_UNUSED(dst);
+ GGML_UNUSED(ctx);
+}
+
+void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[1]->type == GGML_TYPE_I32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ GGML_ASSERT(dst->src[0]->nb[0] == ggml_type_size(dst->src[0]->type));
+ GGML_ASSERT(dst->src[1]->nb[0] == ggml_type_size(dst->src[1]->type));
+ GGML_ASSERT(dst->nb[0] == ggml_type_size(dst->type));
+
+ const int32_t * src1_i32 = (const int32_t *) dst->src[1]->data;
+ /* TODO: Refactor and remove duplicates */
+ switch (dst->src[0]->type) {
+ case GGML_TYPE_F16:
+ get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const sycl::half *)dst->src[0]->data,
+ src1_i32, (float *)dst->data, ctx.stream());
+ break;
+ case GGML_TYPE_F32:
+ get_rows_sycl_float(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
+ src1_i32, (float *)dst->data, ctx.stream());
+ break;
+ case GGML_TYPE_Q4_0:
+ get_rows_sycl<QK4_0, QR4_0, dequantize_q4_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
+ src1_i32, (float *)dst->data, ctx.stream());
+ break;
+ case GGML_TYPE_Q4_1:
+ get_rows_sycl<QK4_1, QR4_1, dequantize_q4_1>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
+ src1_i32, (float *)dst->data, ctx.stream());
+ break;
+ case GGML_TYPE_Q5_0:
+ get_rows_sycl<QK5_0, QR5_0, dequantize_q5_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
+ src1_i32, (float *)dst->data, ctx.stream());
+ break;
+ case GGML_TYPE_Q5_1:
+ get_rows_sycl<QK5_1, QR5_1, dequantize_q5_1>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
+ src1_i32, (float *)dst->data, ctx.stream());
+ break;
+ case GGML_TYPE_Q8_0:
+ get_rows_sycl<QK8_0, QR8_0, dequantize_q8_0>(ctx, dst->src[0], dst->src[1], dst, (const float *)dst->src[0]->data,
+ src1_i32, (float *)dst->data, ctx.stream());
+ break;
+ default:
+ // TODO: k-quants
+ GGML_LOG_ERROR("%s: unsupported type: %s\n", __func__, ggml_type_name(dst->src[0]->type));
+ GGML_ABORT("fatal error");
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/getrows.hpp b/llama.cpp/ggml/src/ggml-sycl/getrows.hpp
new file mode 100644
index 0000000..1c560cd
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/getrows.hpp
@@ -0,0 +1,20 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_GETROWS_HPP
+#define GGML_SYCL_GETROWS_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor *dst);
+
+#endif // GGML_SYCL_GETROWS_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp b/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp
new file mode 100644
index 0000000..0614d7e
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/ggml-sycl.cpp
@@ -0,0 +1,5079 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include <algorithm>
+#include <assert.h>
+#include <atomic>
+#include <cinttypes>
+#include <cstddef>
+#include <cstdint>
+#include <cstdlib>
+#include <float.h>
+#include <limits>
+#include <stdint.h>
+#include <stdio.h>
+#include <vector>
+#include <cmath>
+#include <iostream>
+#include <fstream>
+#include <stdio.h>
+#include <stdlib.h>
+#include <regex>
+
+#include <sycl/sycl.hpp>
+#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC
+# include <sycl/ext/oneapi/experimental/async_alloc/async_alloc.hpp>
+#endif
+#include <sycl/half_type.hpp>
+
+#include "ggml-sycl.h"
+#include "ggml-impl.h"
+#include "ggml-backend-impl.h"
+
+#include "ggml-sycl/add-id.hpp"
+#include "ggml-sycl/backend.hpp"
+#include "ggml-sycl/common.hpp"
+#include "ggml-sycl/element_wise.hpp"
+#include "ggml-sycl/norm.hpp"
+#include "ggml-sycl/presets.hpp"
+#include "ggml-sycl/gemm.hpp"
+#include "ggml-sycl/set_rows.hpp"
+#include "ggml-sycl/set.hpp"
+#include "ggml-sycl/sycl_hw.hpp"
+#include "ggml-sycl/getrows.hpp"
+#include "ggml-sycl/repeat_back.hpp"
+#include "ggml-sycl/quantize.hpp"
+#include "ggml-sycl/ssm_conv.hpp"
+#include "ggml.h"
+
+static bool g_sycl_loaded = false;
+int g_ggml_sycl_debug = 0;
+int g_ggml_sycl_disable_optimize = 0;
+int g_ggml_sycl_disable_graph = 0;
+int g_ggml_sycl_disable_dnn = 0;
+int g_ggml_sycl_prioritize_dmmv = 0;
+int g_ggml_sycl_use_async_mem_op = 0;
+
+static ggml_sycl_device_info ggml_sycl_init() {
+ ggml_sycl_device_info info = {};
+
+ info.device_count = dpct::dev_mgr::instance().device_count();
+ if (info.device_count == 0) {
+ GGML_LOG_ERROR("%s: failed to initialize: %s\n", GGML_SYCL_NAME, __func__);
+ return info;
+ }
+
+ GGML_ASSERT(info.device_count <= GGML_SYCL_MAX_DEVICES);
+
+ int64_t total_vram = 0;
+/* This is a bit misleading; reserved for later */
+// #if defined(SYCL_USE_XMX)
+// GGML_LOG_INFO("%s: SYCL_USE_XMX: yes\n", __func__);
+// #else
+// GGML_LOG_INFO("%s: SYCL_USE_XMX: no\n", __func__);
+// #endif
+ for (int i = 0; i < info.device_count; ++i) {
+ info.devices[i].vmm = 0;
+ dpct::device_info prop;
+ sycl::device device = dpct::dev_mgr::instance().get_device(i);
+
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info(
+ prop, device)));
+
+ info.default_tensor_split[i] = total_vram;
+ total_vram += prop.get_global_mem_size();
+
+ info.devices[i].cc =
+ 100 * prop.get_major_version() + 10 * prop.get_minor_version();
+ info.devices[i].nsm = prop.get_max_compute_units();
+ info.devices[i].opt_feature.reorder = device.ext_oneapi_architecture_is(syclex::arch_category::intel_gpu);
+ info.devices[i].smpbo = prop.get_local_mem_size();
+
+ info.max_work_group_sizes[i] = prop.get_max_work_group_size();
+ }
+
+ for (int id = 0; id < info.device_count; ++id) {
+ info.default_tensor_split[id] /= total_vram;
+ }
+ return info;
+}
+
+const ggml_sycl_device_info & ggml_sycl_info() {
+ static ggml_sycl_device_info info = ggml_sycl_init();
+ return info;
+}
+
+static void print_device_detail(int id, sycl::device &device, std::string device_type) {
+
+ dpct::device_info prop;
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ dpct::get_device_info(prop, device)));
+
+ std::string version;
+ version += std::to_string(prop.get_major_version());
+ version += ".";
+ version += std::to_string(prop.get_minor_version());
+
+ device_type = std::regex_replace(device_type, std::regex("ext_oneapi_"), "");
+ std::string name = std::string(prop.get_name());
+ name = std::regex_replace(name, std::regex("\\(R\\)"), "");
+ name = std::regex_replace(name, std::regex("\\(TM\\)"), "");
+
+ auto global_mem_size = prop.get_global_mem_size()/1000000;
+ GGML_LOG_INFO("|%2d|%19s|%39s|%7s|%7d|%8d|%5d|%6luM|%21s|\n", id, device_type.c_str(),
+ name.c_str(), version.c_str(), prop.get_max_compute_units(),
+ prop.get_max_work_group_size(), prop.get_max_sub_group_size(),
+ global_mem_size, device.get_info<sycl::info::device::driver_version>().c_str());
+}
+
+static void print_device_opt_feature(int device_count) {
+ GGML_LOG_INFO("SYCL Optimization Feature:\n");
+ GGML_LOG_INFO(
+ "|ID| Device Type|Reorder|\n");
+ GGML_LOG_INFO(
+ "|--|-------------------|-------|\n");
+ std::map<std::string, size_t> DeviceNums;
+ for (int id = 0; id < device_count; ++id) {
+ sycl::device device = dpct::dev_mgr::instance().get_device(id);
+ std::string backend_type = get_device_backend_and_type(device);
+ int type_id = DeviceNums[backend_type]++;
+ std::stringstream device_type;
+ device_type << "[" << backend_type << ":" << std::to_string(type_id)
+ << "]";
+ std::string device_type_s = device_type.str();
+ device_type_s = std::regex_replace(device_type_s, std::regex("ext_oneapi_"), "");
+ GGML_LOG_INFO("|%2d|%19s|%7s|\n", id, device_type_s.c_str(),
+ ggml_sycl_info().devices[id].opt_feature.reorder ? "Y": "N");
+ }
+
+}
+void ggml_backend_sycl_print_sycl_devices() {
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_print_sycl_devices\n");
+ int device_count = dpct::dev_mgr::instance().device_count();
+ std::map<std::string, size_t> DeviceNums;
+ GGML_LOG_INFO("Found %d SYCL devices:\n", device_count);
+
+ GGML_LOG_INFO(
+ "| | | | "
+ " |Max | |Max |Global | |\n");
+ GGML_LOG_INFO(
+ "| | | | "
+ " |compute|Max work|sub |mem | |\n");
+ GGML_LOG_INFO(
+ "|ID| Device Type| "
+ "Name|Version|units |group |group|size | Driver version|\n");
+ GGML_LOG_INFO(
+ "|--|-------------------|---------------------------------------|------"
+ "-|-------|--------|-----|-------|---------------------|\n");
+
+ for (int id = 0; id < device_count; ++id) {
+ sycl::device device = dpct::dev_mgr::instance().get_device(id);
+ std::string backend_type = get_device_backend_and_type(device);
+ int type_id = DeviceNums[backend_type]++;
+ std::stringstream device_type;
+ device_type << "[" << backend_type << ":" << std::to_string(type_id)
+ << "]";
+ print_device_detail(id, device, device_type.str());
+ }
+
+ print_device_opt_feature(device_count);
+}
+
+static inline int get_sycl_env(const char *env_name, int default_val) {
+ char *user_device_string = getenv(env_name);
+ int user_number = default_val;
+
+ unsigned n;
+ if (user_device_string != NULL &&
+ sscanf(user_device_string, " %u", &n) == 1) {
+ user_number = (int)n;
+ } else {
+ user_number = default_val;
+ }
+ return user_number;
+}
+
+static void ggml_check_sycl() try {
+ static bool initialized = false;
+
+ if (!initialized) {
+ g_ggml_sycl_debug = get_sycl_env("GGML_SYCL_DEBUG", 0);
+ g_ggml_sycl_disable_optimize = get_sycl_env("GGML_SYCL_DISABLE_OPT", 0);
+ g_ggml_sycl_disable_graph = get_sycl_env("GGML_SYCL_DISABLE_GRAPH", 1);
+ g_ggml_sycl_disable_dnn = get_sycl_env("GGML_SYCL_DISABLE_DNN", 0);
+ g_ggml_sycl_prioritize_dmmv = get_sycl_env("GGML_SYCL_PRIORITIZE_DMMV", 0);
+ GGML_SYCL_DEBUG("[SYCL] call ggml_check_sycl\n");
+ GGML_LOG_INFO("Running with Environment Variables:\n");
+ GGML_LOG_INFO(" GGML_SYCL_DEBUG: %d\n", g_ggml_sycl_debug);
+ GGML_LOG_INFO(" GGML_SYCL_DISABLE_OPT: %d\n", g_ggml_sycl_disable_optimize);
+#ifdef GGML_SYCL_GRAPH
+ GGML_LOG_INFO(" GGML_SYCL_DISABLE_GRAPH: %d\n", g_ggml_sycl_disable_graph);
+#else
+ GGML_LOG_INFO(" GGML_SYCL_DISABLE_GRAPH: graph disabled by compile flag\n");
+#endif
+#if GGML_SYCL_DNNL
+ GGML_LOG_INFO(" GGML_SYCL_DISABLE_DNN: %d\n", g_ggml_sycl_disable_dnn);
+#else
+ GGML_LOG_INFO(" GGML_SYCL_DISABLE_DNN: DNN disabled by compile flag\n");
+#endif
+ GGML_LOG_INFO(" GGML_SYCL_PRIORITIZE_DMMV: %d\n", g_ggml_sycl_prioritize_dmmv);
+ GGML_LOG_INFO("Build with Macros:\n");
+#if defined(GGML_SYCL_FORCE_MMQ)
+ GGML_LOG_INFO(" GGML_SYCL_FORCE_MMQ: yes\n");
+#else
+ GGML_LOG_INFO(" GGML_SYCL_FORCE_MMQ: no\n");
+#endif
+#if defined(GGML_SYCL_F16)
+ GGML_LOG_INFO(" GGML_SYCL_F16: yes\n");
+#else
+ GGML_LOG_INFO(" GGML_SYCL_F16: no\n");
+#endif
+
+/* NOT REMOVE, keep it for next optimize for XMX.
+#if defined(SYCL_USE_XMX)
+ fprintf(stderr, "%s: SYCL_USE_XMX: yes\n", __func__);
+#else
+ fprintf(stderr, "%s: SYCL_USE_XMX: no\n", __func__);
+#endif
+*/
+ // Currently, we only use async malloc / free when graphs are enabled as it is required for the calls to be
+ // properly recorded. As this SYCL extension matures it may be beneficial to enable as the default path and in
+ // other places.
+#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC
+ g_ggml_sycl_use_async_mem_op = !g_ggml_sycl_disable_graph;
+ if (g_ggml_sycl_use_async_mem_op) {
+ for (unsigned int i = 0; i < dpct::dev_mgr::instance().device_count(); ++i) {
+ if (!dpct::dev_mgr::instance().get_device(i).has(sycl::aspect::ext_oneapi_async_memory_alloc)) {
+ g_ggml_sycl_use_async_mem_op = 0;
+ break;
+ }
+ }
+ }
+#endif
+ if (CHECK_TRY_ERROR(g_all_sycl_device_count =
+ dpct::dev_mgr::instance().device_count()) != 0) {
+ initialized = true;
+ g_sycl_loaded = false;
+ return;
+ }
+ GGML_ASSERT(g_all_sycl_device_count <= GGML_SYCL_MAX_DEVICES);
+
+ initialized = true;
+ g_sycl_loaded = true;
+ ggml_backend_sycl_print_sycl_devices();
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+/*
+device_index: device index from 0 to n (continue numbers).
+ It is used for device select/set in SYCL backend internal data structure.
+*/
+inline void check_allow_gpu_index(const int device_index) {
+ if (device_index >= ggml_sycl_info().device_count) {
+ char error_buf[256];
+ snprintf(
+ error_buf,
+ sizeof(error_buf),
+ "%s error: device_index:%d is out of range: [0-%d]",
+ __func__,
+ device_index,
+ ggml_sycl_info().device_count - 1);
+ GGML_LOG_ERROR("%s\n", error_buf);
+ assert(false);
+ }
+}
+
+GGML_API void ggml_backend_sycl_get_gpu_list(int *id_list, int max_len) try {
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_gpu_list\n");
+ for(int i=0;i<max_len;i++) id_list[i] = -1;
+
+ for (int i=0;i< ggml_sycl_info().device_count;i++){
+ if (i>=max_len) break;
+ id_list[i] = i;
+ }
+ return;
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+// sycl buffer
+
+struct ggml_backend_sycl_buffer_context {
+ int device;
+ void * dev_ptr = nullptr;
+ queue_ptr stream;
+ std::string name;
+ optimize_feature opt_feature;
+ std::vector<ggml_tensor_extra_gpu *> tensor_extras;
+
+ ggml_backend_sycl_buffer_context(int device, void * dev_ptr, queue_ptr stream) :
+ device(device), dev_ptr(dev_ptr), stream(stream) {
+ check_allow_gpu_index(device);
+ name = (GGML_SYCL_NAME + std::to_string(device));
+ opt_feature = ggml_sycl_info().devices[device].opt_feature;
+ }
+
+ ~ggml_backend_sycl_buffer_context() {
+ if (dev_ptr != nullptr) {
+ ggml_sycl_set_device(device);
+ SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(dev_ptr, *stream)));
+ }
+
+ //release extra used by tensors
+ for (ggml_tensor_extra_gpu * extra : tensor_extras) {
+ release_extra_gpu(extra);
+ }
+
+ }
+};
+
+static const char * ggml_backend_sycl_buffer_type_get_name(ggml_backend_buffer_type_t buft);
+
+static bool ggml_backend_buffer_is_sycl(ggml_backend_buffer_t buffer) {
+ return buffer->buft->iface.get_name == ggml_backend_sycl_buffer_type_get_name;
+}
+
+static void
+ggml_backend_sycl_buffer_free_buffer(ggml_backend_buffer_t buffer) try {
+ ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
+ ggml_sycl_set_device(ctx->device);
+
+ delete ctx;
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void * ggml_backend_sycl_buffer_get_base(ggml_backend_buffer_t buffer) {
+ ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
+ return ctx->dev_ptr;
+}
+
+static enum ggml_status
+ggml_backend_sycl_buffer_init_tensor(ggml_backend_buffer_t buffer,
+ ggml_tensor *tensor) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor, "\n").c_str());
+ ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *)buffer->context;
+
+ if (tensor->view_src != NULL) {
+ assert(tensor->view_src->buffer->buft == buffer->buft);
+ return GGML_STATUS_SUCCESS;
+ }
+ if ((tensor->type == GGML_TYPE_Q4_0 || tensor->type == GGML_TYPE_Q4_K || tensor->type == GGML_TYPE_Q6_K) &&
+ !g_ggml_sycl_disable_optimize) {
+ ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{};
+ tensor->extra = extra;
+ ctx->tensor_extras.push_back(extra); //used to release it when destroy ctx.
+ }
+
+ if (ggml_is_quantized(tensor->type)) {
+ // initialize padding to 0 to avoid possible NaN values
+ size_t original_size = ggml_nbytes(tensor);
+ size_t padded_size = ggml_backend_buft_get_alloc_size(buffer->buft, tensor);
+
+ if (padded_size > original_size && tensor->view_src == nullptr) {
+ SYCL_CHECK(CHECK_TRY_ERROR(ctx->stream->memset(
+ (char *)tensor->data + original_size, 0,
+ padded_size - original_size).wait()));
+ }
+ }
+ return GGML_STATUS_SUCCESS;
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_buffer_set_tensor(ggml_backend_buffer_t buffer,
+ ggml_tensor *tensor,
+ const void *data, size_t offset,
+ size_t size) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str());
+ GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset);
+ ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
+ ggml_sycl_set_device(ctx->device);
+ auto stream = &(dpct::dev_mgr::instance().get_device(ctx->device).default_queue());
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::dev_mgr::instance().get_device(ctx->device).queues_wait_and_throw()));
+#ifndef _WIN32
+ // Note: Use host buffer to save the data from mmap(), then copy to device. It's workaround for mmap() issue on PVC GPU.
+ // This function will be called during load model from disk. Use memory buffer replace dynamic won't save more time and brings potential memory leak risk here.
+ char * host_buf = (char *) malloc(size);
+ memcpy(host_buf, data, size);
+ SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy((char *) tensor->data + offset, host_buf, size).wait()));
+ free(host_buf);
+#else
+ SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy((char *) tensor->data + offset, data, size).wait()));
+#endif
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_buffer_get_tensor(ggml_backend_buffer_t buffer,
+ const ggml_tensor *tensor,
+ void *data, size_t offset,
+ size_t size) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str());
+ GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset);
+ ggml_backend_sycl_buffer_context * ctx = ( ggml_backend_sycl_buffer_context *)buffer->context;
+
+ ggml_sycl_set_device(ctx->device);
+ auto stream = dpct::dev_mgr::instance().get_device(ctx->device).default_queue();
+
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ stream.memcpy(data, (const char *)tensor->data + offset, size)
+ .wait()));
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void dev2dev_memcpy(sycl::queue &q_dst, sycl::queue &q_src, void *ptr_dst,
+ const void *ptr_src, size_t size) {
+ char *host_buf = (char *)malloc(size);
+ q_src.memcpy(host_buf, (const char *)ptr_src, size).wait();
+ q_dst.memcpy((char *)ptr_dst, host_buf, size).wait();
+ free(host_buf);
+}
+
+static bool
+ggml_backend_sycl_buffer_cpy_tensor(ggml_backend_buffer_t buffer,
+ const ggml_tensor *src,
+ ggml_tensor *dst) try {
+ bool is_cpy_supported = ggml_backend_buffer_is_sycl(src->buffer);
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": dst", dst).c_str());
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" src", src).c_str());
+ GGML_SYCL_DEBUG(" is_cpy_supported=%d\n", is_cpy_supported);
+ if (is_cpy_supported) {
+ ggml_backend_sycl_buffer_context * src_ctx = (ggml_backend_sycl_buffer_context *)src->buffer->context;
+ ggml_backend_sycl_buffer_context * dst_ctx = (ggml_backend_sycl_buffer_context *)dst->buffer->context;
+
+ ggml_sycl_set_device(src_ctx->device);
+ /*
+ DPCT1009:198: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ dpct::dev_mgr::instance().get_device(src_ctx->device).queues_wait_and_throw()));
+ ggml_sycl_set_device(dst_ctx->device);
+ /*
+ DPCT1009:199: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ dpct::dev_mgr::instance().get_device(dst_ctx->device).queues_wait_and_throw()));
+ /*
+ DPCT1009:200: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+
+ queue_ptr stream_dst = dst_ctx->stream;
+ queue_ptr stream_src = src_ctx->stream;
+ size_t size = ggml_nbytes(src);
+
+ //todo. it's dirty solutino to walkaroud known issue:device2device cross GPUs.
+ dev2dev_memcpy(*stream_dst, *stream_src, dst->data, src->data, size);
+
+//todo, it's known issue:error in device2device cross GPUs. reused when the issue is fixed. DON"T remove
+#if 0
+ SYCL_CHECK(CHECK_TRY_ERROR((*stream).memcpy(
+ (char *)dst->data, (const char *)src->data, size).wait()));
+
+ /*
+ DPCT1009:201: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ dpct::dev_mgr::instance().get_device(dst_ctx->device).queues_wait_and_throw()));
+#endif
+ return true;
+ }
+ return false;
+ GGML_UNUSED(buffer);
+} catch (const sycl::exception & exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_buffer_clear(ggml_backend_buffer_t buffer,
+ uint8_t value) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s: size=%zu\n", __func__, buffer->size);
+ ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context;
+
+ ggml_sycl_set_device(ctx->device);
+ queue_ptr stream = ctx->stream;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(dpct::get_current_device().queues_wait_and_throw()));
+
+ SYCL_CHECK(CHECK_TRY_ERROR((*stream)
+ .memset(ctx->dev_ptr, value, buffer->size)
+ .wait()));
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_buffer_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value,
+ size_t offset, size_t size) {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str());
+ GGML_SYCL_DEBUG(" size=%zu offset=%zu value=%u\n", size, offset, value);
+ ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context;
+ SYCL_CHECK(ggml_sycl_set_device(ctx->device));
+ auto stream = &(dpct::dev_mgr::instance().get_device(ctx->device).default_queue());
+ if (size == 0) {
+ return; // Nothing to do
+ }
+ if (tensor->data == nullptr) {
+ GGML_ABORT("Error: Tensor data pointer is null.\n");
+ }
+ void * target_ptr = static_cast<char *>(tensor->data) + offset;
+ SYCL_CHECK(CHECK_TRY_ERROR((*stream).memset(target_ptr, value, size)));
+ SYCL_CHECK(CHECK_TRY_ERROR((*stream).wait()));
+}
+
+static void ggml_backend_sycl_buffer_reset(ggml_backend_buffer_t buffer) {
+ GGML_SYCL_DEBUG("[SYCL] call %s\n", __func__);
+ if (buffer == nullptr) {
+ return;
+ }
+
+ ggml_backend_sycl_buffer_context * ctx = (ggml_backend_sycl_buffer_context *) buffer->context;
+
+ if (ctx != nullptr) {
+ for (ggml_tensor_extra_gpu * extra : ctx->tensor_extras) {
+ release_extra_gpu(extra);
+ }
+ ctx->tensor_extras.clear(); // reset the tensor_extras vector
+ }
+}
+
+static const ggml_backend_buffer_i ggml_backend_sycl_buffer_interface = {
+ /* .free_buffer = */ ggml_backend_sycl_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_sycl_buffer_get_base,
+ /* .init_tensor = */ ggml_backend_sycl_buffer_init_tensor,
+ /* .memset_tensor = */ ggml_backend_sycl_buffer_memset_tensor,
+ /* .set_tensor = */ ggml_backend_sycl_buffer_set_tensor,
+ /* .get_tensor = */ ggml_backend_sycl_buffer_get_tensor,
+ /* .cpy_tensor = */ ggml_backend_sycl_buffer_cpy_tensor,
+ /* .clear = */ ggml_backend_sycl_buffer_clear,
+ /* .reset = */ ggml_backend_sycl_buffer_reset,
+};
+
+// sycl buffer type
+struct ggml_backend_sycl_buffer_type_context {
+ int device;
+ std::string name;
+
+ // each buffer type has its own stream
+ queue_ptr stream = nullptr;
+};
+
+static const char * ggml_backend_sycl_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
+ ggml_backend_sycl_buffer_type_context * ctx = (ggml_backend_sycl_buffer_type_context *)buft->context;
+
+ return ctx->name.c_str();
+}
+
+static ggml_backend_buffer_t
+ggml_backend_sycl_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft,
+ size_t size) try {
+ ggml_backend_sycl_buffer_type_context * buft_ctx = (ggml_backend_sycl_buffer_type_context *)buft->context;
+ ggml_sycl_set_device(buft_ctx->device);
+ const queue_ptr stream = buft_ctx->stream;
+ size = std::max(size, (size_t)1); // syclMalloc returns null for size 0
+
+ void * dev_ptr;
+ SYCL_CHECK(CHECK_TRY_ERROR(dev_ptr = (void *)sycl::malloc_device(
+ size, *stream)));
+ if (!dev_ptr) {
+ GGML_LOG_ERROR("%s: can't allocate %lu Bytes of memory on device\n", __func__, size);
+ return nullptr;
+ }
+ ggml_backend_sycl_buffer_context * ctx = new ggml_backend_sycl_buffer_context(buft_ctx->device, dev_ptr, buft_ctx->stream);
+ return ggml_backend_buffer_init(buft, ggml_backend_sycl_buffer_interface, ctx, size);
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static size_t ggml_backend_sycl_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
+ return 128;
+ GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_sycl_buffer_type_get_max_size(ggml_backend_buffer_type_t buft) {
+ return dpct::get_current_device().get_max_mem_alloc_size();
+
+ GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_sycl_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ size_t size = ggml_nbytes(tensor);
+ int64_t ne0 = tensor->ne[0];
+
+ if (ggml_is_quantized(tensor->type)) {
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+ }
+
+ return size;
+
+ GGML_UNUSED(buft);
+}
+
+static const ggml_backend_buffer_type_i ggml_backend_sycl_buffer_type_interface = {
+ /* .get_name = */ ggml_backend_sycl_buffer_type_get_name,
+ /* .alloc_buffer = */ ggml_backend_sycl_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_sycl_buffer_type_get_alignment,
+ /* .get_max_size = */ ggml_backend_sycl_buffer_type_get_max_size,
+ /* .get_alloc_size = */ ggml_backend_sycl_buffer_type_get_alloc_size,
+ /* .is_host = */ NULL,
+};
+
+ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(int device) {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+
+
+ auto dev_count = ggml_backend_sycl_get_device_count();
+
+ if (device>=dev_count or device<0) {
+ GGML_LOG_ERROR("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n",
+ device, dev_count-1);
+ GGML_ASSERT(device<dev_count);
+ }
+ static struct ggml_backend_buffer_type ggml_backend_sycl_buffer_types[GGML_SYCL_MAX_DEVICES];
+
+ static bool ggml_backend_sycl_buffer_type_initialized = false;
+
+ if (!ggml_backend_sycl_buffer_type_initialized) {
+ for (int i = 0; i < dev_count; i++) {
+ auto & device_i = dpct::dev_mgr::instance().get_device(i);
+ queue_ptr stream = &(device_i.default_queue());
+ ggml_backend_sycl_buffer_types[i] = {
+ /* .iface = */ ggml_backend_sycl_buffer_type_interface,
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_sycl_reg(), i),
+ /* .context = */ new ggml_backend_sycl_buffer_type_context{i, GGML_SYCL_NAME + std::to_string(i), stream},
+ };
+ }
+ ggml_backend_sycl_buffer_type_initialized = true;
+ }
+ return &ggml_backend_sycl_buffer_types[device];
+}
+
+static ggml_backend_buffer_type_t ggml_backend_sycl_buffer_type(ggml_backend_sycl_context * ctx) {
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_buffer_type\n");
+
+ int device = ctx->device;
+ if (device>=ggml_sycl_info().device_count or device<0) {
+ GGML_LOG_ERROR("ggml_backend_sycl_buffer_type error: device_index:%d is out of range [0, %d], miss to call ggml_backend_sycl_set_single_device()\n",
+ device, ggml_sycl_info().device_count-1);
+ GGML_ASSERT(device<ggml_sycl_info().device_count);
+ }
+ static struct ggml_backend_buffer_type ggml_backend_sycl_buffer_types[GGML_SYCL_MAX_DEVICES];
+
+ static bool ggml_backend_sycl_buffer_type_initialized = false;
+
+ if (!ggml_backend_sycl_buffer_type_initialized) {
+ for (int i = 0; i < ggml_sycl_info().device_count; i++) {
+ ggml_backend_sycl_buffer_types[i] = {
+ /* .iface = */ ggml_backend_sycl_buffer_type_interface,
+ /* .device = */ nullptr,
+ /* .context = */ new ggml_backend_sycl_buffer_type_context{i, GGML_SYCL_NAME + std::to_string(i), ctx->stream(i, 0)},
+ };
+ }
+ ggml_backend_sycl_buffer_type_initialized = true;
+ }
+ return &ggml_backend_sycl_buffer_types[device];
+}
+
+// sycl split buffer
+
+static int64_t get_row_rounding(ggml_type type, const std::array<float, GGML_SYCL_MAX_DEVICES> & tensor_split) {
+ int64_t min_compute_capability = INT_MAX;
+ int64_t max_compute_capability = INT_MIN;
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ if (tensor_split[i] < (i + 1 < ggml_sycl_info().device_count ? tensor_split[i + 1] : 1.0f)) {
+ if (min_compute_capability > ggml_sycl_info().devices[i].cc) {
+ min_compute_capability = ggml_sycl_info().devices[i].cc;
+ }
+ if (max_compute_capability < ggml_sycl_info().devices[i].cc) {
+ max_compute_capability = ggml_sycl_info().devices[i].cc;
+ }
+ }
+ }
+
+ switch(type) {
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ return max_compute_capability >= VER_GEN9 ? 128 : 64;
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ return 64;
+ case GGML_TYPE_F16:
+ case GGML_TYPE_F32:
+ return 1;
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_IQ2_XXS:
+ case GGML_TYPE_IQ2_XS:
+ case GGML_TYPE_IQ2_S:
+ case GGML_TYPE_IQ1_S:
+ case GGML_TYPE_IQ1_M:
+ case GGML_TYPE_IQ3_XXS:
+ case GGML_TYPE_IQ4_XS:
+ case GGML_TYPE_IQ4_NL:
+ return max_compute_capability >= VER_GEN9 ? 128 : 64;
+ case GGML_TYPE_IQ3_S:
+ return max_compute_capability >= VER_GEN9 ? 128 : 64;
+ case GGML_TYPE_Q6_K:
+ return 64;
+ default:
+ GGML_ABORT("fatal error");
+ }
+}
+
+static void get_row_split(int64_t * row_low, int64_t * row_high, const ggml_tensor * tensor, const std::array<float, GGML_SYCL_MAX_DEVICES> & tensor_split, int id) {
+ const int64_t nrows = ggml_nrows(tensor);
+ const int64_t rounding = get_row_rounding(tensor->type, tensor_split);
+
+ *row_low = id == 0 ? 0 : nrows*tensor_split[id];
+ *row_low -= *row_low % rounding;
+ if (id == ggml_sycl_info().device_count - 1) {
+ *row_high = nrows;
+ } else {
+ *row_high = nrows*tensor_split[id + 1];
+ *row_high -= *row_high % rounding;
+ }
+}
+
+static size_t ggml_nbytes_split(const struct ggml_tensor * tensor, int nrows_split) {
+ static_assert(GGML_MAX_DIMS == 4, "GGML_MAX_DIMS is not 4 - update this function");
+
+ return nrows_split*ggml_row_size(tensor->type, tensor->ne[0]);
+}
+
+struct ggml_backend_sycl_split_buffer_type_context {
+ std::array<float, GGML_SYCL_MAX_DEVICES> tensor_split;
+};
+
+struct ggml_backend_sycl_split_buffer_context {
+ ~ggml_backend_sycl_split_buffer_context() try {
+ for (ggml_tensor_extra_gpu * extra : tensor_extras) {
+ release_extra_gpu(extra, streams);
+ }
+ }
+ catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+ }
+
+ std::vector<ggml_tensor_extra_gpu *> tensor_extras;
+ std::vector<queue_ptr> streams;
+};
+
+static void ggml_backend_sycl_split_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
+ delete ctx;
+}
+
+static void * ggml_backend_sycl_split_buffer_get_base(ggml_backend_buffer_t buffer) {
+ // the pointers are stored in the tensor extras, this is just a dummy address and never dereferenced
+ return (void *)0x1000;
+
+ GGML_UNUSED(buffer);
+}
+
+static enum ggml_status
+ggml_backend_sycl_split_buffer_init_tensor(ggml_backend_buffer_t buffer,
+ ggml_tensor *tensor) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor, "\n").c_str());
+ GGML_ASSERT(tensor->view_src == nullptr); // views of split tensors are not supported
+
+ ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
+ ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context;
+
+ const int64_t ne0 = tensor->ne[0];
+
+ ggml_tensor_extra_gpu * extra = new ggml_tensor_extra_gpu{};
+
+ ctx->tensor_extras.push_back(extra);
+ ctx->streams.push_back(&(dpct::get_current_device().default_queue()));
+
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ size_t size = ggml_nbytes_split(tensor, nrows_split);
+ const size_t original_size = size;
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+
+ // FIXME: do not crash if SYCL Buffer alloc fails
+ // currently, init_tensor cannot fail, it needs to be fixed in ggml-backend first
+ ggml_sycl_set_device(i);
+ const queue_ptr stream = ctx->streams[i];
+ char * buf;
+ /*
+ DPCT1009:208: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+ SYCL_CHECK(CHECK_TRY_ERROR(buf = (char *)sycl::malloc_device(
+ size, *stream)));
+ if (!buf) {
+ char err_buf[1024];
+ snprintf(err_buf, 1023, "%s: can't allocate %lu Bytes of memory on device\n", __func__, size);
+ throw std::runtime_error(err_buf);
+ }
+ // set padding to 0 to avoid possible NaN values
+ if (size > original_size) {
+ /*
+ DPCT1009:209: SYCL uses exceptions to report errors and does not use
+ the error codes. The original code was commented out and a warning
+ string was inserted. You need to rewrite this code.
+ */
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ (*stream)
+ .memset(buf + original_size, 0, size - original_size)
+ .wait()));
+ }
+
+ extra->data_device[i] = buf;
+
+ for (int64_t is = 0; is < GGML_SYCL_MAX_STREAMS; ++is) {
+ /*
+ DPCT1009:210: SYCL uses exceptions to report errors and does not use
+ the error codes. The original code was commented out and a warning
+ string was inserted. You need to rewrite this code.
+ */
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(extra->events[i][is] = new sycl::event()));
+ }
+ }
+ tensor->extra = extra;
+ return GGML_STATUS_SUCCESS;
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void
+ggml_backend_sycl_split_buffer_set_tensor(ggml_backend_buffer_t buffer,
+ ggml_tensor *tensor, const void *data,
+ size_t offset, size_t size) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str());
+ GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset);
+ // split tensors must always be set in their entirety at once
+ GGML_ASSERT(offset == 0);
+ GGML_ASSERT(size == ggml_nbytes(tensor));
+
+ ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
+ ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context;
+
+ const int64_t ne0 = tensor->ne[0];
+ const size_t nb1 = tensor->nb[1];
+ ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra;
+
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ const size_t offset_split = row_low*nb1;
+ size_t size = ggml_nbytes_split(tensor, nrows_split);
+ const size_t original_size = size;
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+
+ const char * buf_host = (const char *)data + offset_split;
+ /*
+ DPCT1009:211: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+ ggml_sycl_set_device(i);
+ const queue_ptr stream = ctx->streams[i];
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ (*stream)
+ .memcpy(extra->data_device[i], buf_host, original_size)
+ .wait()));
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void
+ggml_backend_sycl_split_buffer_get_tensor(ggml_backend_buffer_t buffer,
+ const ggml_tensor *tensor, void *data,
+ size_t offset, size_t size) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str());
+ GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset);
+ // split tensors must always be set in their entirety at once
+ GGML_ASSERT(offset == 0);
+ GGML_ASSERT(size == ggml_nbytes(tensor));
+
+ ggml_backend_sycl_split_buffer_context * ctx = (ggml_backend_sycl_split_buffer_context *)buffer->context;
+ ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *)buffer->buft->context;
+
+ const int64_t ne0 = tensor->ne[0];
+ const size_t nb1 = tensor->nb[1];
+ ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *)tensor->extra;
+
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, buft_ctx->tensor_split, i);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ const size_t offset_split = row_low*nb1;
+ size_t size = ggml_nbytes_split(tensor, nrows_split);
+ const size_t original_size = size;
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+
+ char * buf_host = (char *)data + offset_split;
+ /*
+ DPCT1009:212: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+ ggml_sycl_set_device(i);
+ const queue_ptr stream = ctx->streams[i];
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ (*stream)
+ .memcpy(buf_host, extra->data_device[i], original_size)
+ .wait()));
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_split_buffer_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+ GGML_UNUSED(buffer);
+ GGML_UNUSED(value);
+}
+
+static struct ggml_backend_buffer_i ggml_backend_sycl_split_buffer_interface = {
+ /* .free_buffer = */ ggml_backend_sycl_split_buffer_free_buffer,
+ /* .get_base = */ ggml_backend_sycl_split_buffer_get_base,
+ /* .init_tensor = */ ggml_backend_sycl_split_buffer_init_tensor,
+ /* .memset_tensor = */ NULL,
+ /* .set_tensor = */ ggml_backend_sycl_split_buffer_set_tensor,
+ /* .get_tensor = */ ggml_backend_sycl_split_buffer_get_tensor,
+ /* .cpy_tensor = */ NULL,
+ /* .clear = */ ggml_backend_sycl_split_buffer_clear,
+ /* .reset = */ NULL,
+};
+
+// sycl split buffer type
+
+static const char * ggml_backend_sycl_split_buffer_type_get_name(ggml_backend_buffer_type_t buft) {
+ return GGML_SYCL_NAME "_Split";
+
+ GGML_UNUSED(buft);
+}
+
+static bool ggml_backend_buffer_is_sycl_split(ggml_backend_buffer_t buffer) {
+ return buffer->buft->iface.get_name == ggml_backend_sycl_split_buffer_type_get_name;
+}
+
+static ggml_backend_buffer_t ggml_backend_sycl_split_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ // since we don't know the exact split after rounding, we cannot allocate the device buffers at this point
+ // instead, we allocate them for each tensor separately in init_tensor
+ // however, the size still represents the maximum cumulative size of all the device buffers after the tensors are allocated,
+ // as returned by get_alloc_size. this limit is enforced during tensor allocation by ggml-alloc, so it must be correct.
+ ggml_backend_sycl_split_buffer_context * ctx = new ggml_backend_sycl_split_buffer_context();
+
+ return ggml_backend_buffer_init(buft, ggml_backend_sycl_split_buffer_interface, ctx, size);
+}
+
+static size_t ggml_backend_sycl_split_buffer_type_get_alignment(ggml_backend_buffer_type_t buft) {
+ return 128;
+ GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_sycl_split_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ ggml_backend_sycl_split_buffer_type_context * ctx = (ggml_backend_sycl_split_buffer_type_context *)buft->context;
+
+ size_t total_size = 0;
+
+ const int64_t ne0 = tensor->ne[0];
+
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ int64_t row_low, row_high;
+ get_row_split(&row_low, &row_high, tensor, ctx->tensor_split, i);
+
+ int64_t nrows_split = row_high - row_low;
+ if (nrows_split == 0) {
+ continue;
+ }
+
+ total_size += ggml_nbytes_split(tensor, nrows_split);
+
+ // pad last row to a multiple of 512 elements to avoid out-of-bounds memory accesses
+ if (ne0 % MATRIX_ROW_PADDING != 0) {
+ total_size += ggml_row_size(tensor->type, MATRIX_ROW_PADDING - ne0 % MATRIX_ROW_PADDING);
+ }
+ }
+
+ return total_size;
+}
+
+static bool ggml_backend_sycl_split_buffer_type_is_host(ggml_backend_buffer_type_t buft) {
+ return false;
+
+ GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_i ggml_backend_sycl_split_buffer_type_interface = {
+ /* .get_name = */ ggml_backend_sycl_split_buffer_type_get_name,
+ /* .alloc_buffer = */ ggml_backend_sycl_split_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_sycl_split_buffer_type_get_alignment,
+ /* .get_max_size = */ NULL, // defaults to SIZE_MAX
+ /* .get_alloc_size = */ ggml_backend_sycl_split_buffer_type_get_alloc_size,
+ /* .is_host = */ ggml_backend_sycl_split_buffer_type_is_host,
+};
+
+ggml_backend_buffer_type_t ggml_backend_sycl_split_buffer_type(const float * tensor_split) {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_split_buffer_type\n");
+ ggml_check_sycl();
+ // FIXME: this is not thread safe
+ static std::map<std::array<float, GGML_SYCL_MAX_DEVICES>, struct ggml_backend_buffer_type> buft_map;
+
+ std::array<float, GGML_SYCL_MAX_DEVICES> tensor_split_arr = {};
+
+ bool all_zero = tensor_split == nullptr || std::all_of(tensor_split, tensor_split + GGML_SYCL_MAX_DEVICES, [](float x) { return x == 0.0f; });
+ if (all_zero) {
+ tensor_split_arr = ggml_sycl_info().default_tensor_split;
+ } else {
+ float split_sum = 0.0f;
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ tensor_split_arr[i] = split_sum;
+ split_sum += tensor_split[i];
+ }
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ tensor_split_arr[i] /= split_sum;
+ }
+ }
+
+ auto it = buft_map.find(tensor_split_arr);
+ if (it != buft_map.end()) {
+ return &it->second;
+ }
+
+ struct ggml_backend_buffer_type buft {
+ /* .iface = */ ggml_backend_sycl_split_buffer_type_interface,
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_sycl_reg(), 0),
+ /* .context = */ new ggml_backend_sycl_split_buffer_type_context{tensor_split_arr},
+ };
+
+ auto result = buft_map.emplace(tensor_split_arr, buft);
+ return &result.first->second;
+}
+
+// host buffer type
+
+static const char * ggml_backend_sycl_host_buffer_type_name(ggml_backend_buffer_type_t buft) {
+ return GGML_SYCL_NAME "_Host";
+
+ GGML_UNUSED(buft);
+}
+
+inline void * aligned_malloc_host(size_t alignment, size_t size) {
+#ifdef _WIN32
+ return _aligned_malloc(size, alignment);
+#else
+ return aligned_alloc(alignment, size);
+#endif
+}
+
+inline void free_aligned_mem_host(void * memblock) {
+#ifdef _WIN32
+ _aligned_free(memblock);
+#else
+ free(memblock);
+#endif
+}
+
+static void ggml_backend_sycl_host_buffer_free_buffer(ggml_backend_buffer_t buffer) {
+ free_aligned_mem_host((void *)buffer->context);
+}
+
+static ggml_backend_buffer_t ggml_backend_sycl_host_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ void * ptr = aligned_malloc_host(TENSOR_ALIGNMENT, size);
+ if (ptr == nullptr) {
+ // fallback to cpu buffer
+ return ggml_backend_buft_alloc_buffer(ggml_backend_cpu_buffer_type(), size);
+ }
+
+ // FIXME: this is a hack to avoid having to implement a new buffer type
+ ggml_backend_buffer_t buffer = ggml_backend_cpu_buffer_from_ptr(ptr, size);
+ buffer->buft = buft;
+ buffer->iface.free_buffer = ggml_backend_sycl_host_buffer_free_buffer;
+
+ return buffer;
+}
+
+ggml_backend_buffer_type_t ggml_backend_sycl_host_buffer_type() {
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_host_buffer_type\n");
+ static struct ggml_backend_buffer_type ggml_backend_sycl_buffer_type_host = {
+ /* .iface = */ {
+ /* .get_name = */ ggml_backend_sycl_host_buffer_type_name,
+ /* .alloc_buffer = */ ggml_backend_sycl_host_buffer_type_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_cpu_buffer_type()->iface.get_alignment,
+ /* .get_max_size = */ NULL, // TODO: return device.maxBufferLength
+ /* .get_alloc_size = */ ggml_backend_cpu_buffer_type()->iface.get_alloc_size,
+ /* .is_host = */ ggml_backend_cpu_buffer_type()->iface.is_host,
+ },
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_sycl_reg(), 0),
+ /* .context = */ nullptr,
+ };
+
+ return &ggml_backend_sycl_buffer_type_host;
+}
+
+// buffer pool for sycl (legacy)
+struct ggml_sycl_pool_leg : public ggml_sycl_pool {
+ static const int MAX_SYCL_BUFFERS = 256;
+
+ int device;
+ queue_ptr qptr;
+ struct ggml_sycl_buffer {
+ void * ptr = nullptr;
+ size_t size = 0;
+ };
+
+ ggml_sycl_buffer buffer_pool[MAX_SYCL_BUFFERS] = {};
+ size_t pool_size = 0;
+
+ explicit ggml_sycl_pool_leg(queue_ptr qptr_, int device_) : device(device_), qptr(qptr_) {}
+
+ ~ggml_sycl_pool_leg() {
+ for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) {
+ ggml_sycl_buffer & b = buffer_pool[i];
+ if (b.ptr != nullptr) {
+ SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(b.ptr, *qptr)));
+ pool_size -= b.size;
+ }
+ }
+ GGML_ASSERT(pool_size == 0);
+ }
+
+ void * alloc(size_t size, size_t * actual_size) override {
+#ifdef DEBUG_sycl_MALLOC
+ int nnz = 0;
+ size_t max_size = 0;
+#endif
+ size_t best_diff = 1ull << 36;
+ int ibest = -1;
+ for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) {
+ ggml_sycl_buffer& b = buffer_pool[i];
+ if (b.ptr != nullptr) {
+#ifdef DEBUG_sycl_MALLOC
+ ++nnz;
+ if (b.size > max_size) max_size = b.size;
+#endif
+ if (b.size >= size) {
+ size_t diff = b.size - size;
+ if (diff < best_diff) {
+ best_diff = diff;
+ ibest = i;
+ if (!best_diff) {
+ void * ptr = b.ptr;
+ *actual_size = b.size;
+ b.ptr = nullptr;
+ b.size = 0;
+ return ptr;
+ }
+ }
+ }
+ }
+ }
+ if (ibest >= 0) {
+ ggml_sycl_buffer& b = buffer_pool[ibest];
+ void * ptr = b.ptr;
+ *actual_size = b.size;
+ b.ptr = nullptr;
+ b.size = 0;
+ return ptr;
+ }
+ void * ptr;
+ size_t look_ahead_size = (size_t) (1.05 * size);
+
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(ptr = (void *)sycl::malloc_device(
+ look_ahead_size, *qptr)));
+ if (!ptr) {
+ GGML_LOG_ERROR("%s: can't allocate %lu Bytes of memory on device/GPU\n", __func__, look_ahead_size);
+ return nullptr;
+ }
+
+ *actual_size = look_ahead_size;
+ pool_size += look_ahead_size;
+
+#ifdef DEBUG_SYCL_MALLOC
+ GGML_LOG_DEBUG("%s[%d]: %d buffers, max_size = %u MB, pool_size = %u MB, requested %u MB\n", __func__, id, nnz,
+ (uint32_t)(max_size/1024/1024), (uint32_t)(g_sycl_pool_size[id]/1024/1024), (uint32_t)(size/1024/1024));
+#endif
+
+ // GGML_SYCL_DEBUG("ggml_sycl_pool_malloc_leg look_ahead_size=%lu, return %p\n", look_ahead_size, ptr);
+ return ptr;
+ }
+
+ void free(void * ptr, size_t size) override {
+ for (int i = 0; i < MAX_SYCL_BUFFERS; ++i) {
+ ggml_sycl_buffer& b = buffer_pool[i];
+ if (b.ptr == nullptr) {
+ b.ptr = ptr;
+ b.size = size;
+ return;
+ }
+ }
+ GGML_LOG_WARN("WARNING: sycl buffer pool full, increase MAX_sycl_BUFFERS\n");
+ SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(ptr, *qptr)));
+ pool_size -= size;
+ }
+};
+
+struct ggml_sycl_pool_host : public ggml_sycl_pool {
+ queue_ptr qptr;
+ int device;
+
+ inline static int counter{ 0 };
+
+ struct ggml_sycl_buffer {
+ void * ptr = nullptr;
+ size_t size = 0;
+ };
+
+ // Set arbitrarly to 64
+ static constexpr int MAX_POOL_SIZE{ 64 };
+ std::vector<ggml_sycl_buffer> buffer_pool = std::vector<ggml_sycl_buffer>(MAX_POOL_SIZE);
+ size_t pool_size = 0;
+
+ explicit ggml_sycl_pool_host(queue_ptr qptr_, int device_) : qptr(qptr_), device(device_) {}
+
+ ~ggml_sycl_pool_host() {
+ for (int i = 0; i < MAX_POOL_SIZE; ++i) {
+ ggml_sycl_buffer & b = buffer_pool[i];
+ if (b.ptr != nullptr) {
+ SYCL_CHECK(CHECK_TRY_ERROR(sycl::free(b.ptr, *qptr)));
+ b.ptr = nullptr;
+ pool_size -= b.size;
+ b.size = 0;
+ }
+ }
+ counter = 0;
+ }
+
+ void * alloc(size_t size, size_t * actual_size) override {
+ if (counter == MAX_POOL_SIZE) {
+ ggml_sycl_buffer b = buffer_pool[0];
+ void * ptr = b.ptr;
+ *actual_size = b.size;
+ counter = 1;
+ return ptr;
+ }
+ ggml_sycl_buffer & b = buffer_pool[counter];
+
+ if (b.ptr == nullptr) {
+ void * ptr;
+
+ SYCL_CHECK(CHECK_TRY_ERROR(ptr = (void *) sycl::malloc_host(size, *qptr)));
+ if (!ptr) {
+ GGML_LOG_ERROR("%s: can't allocate %lu Bytes of memory on host\n", __func__, size);
+ return nullptr;
+ }
+ pool_size += size;
+ *actual_size = size;
+ counter = counter + 1;
+ return ptr;
+ } else {
+ ++counter;
+ b.size = size;
+ return b.ptr;
+ }
+ }
+
+ void free(void * ptr, size_t size) override {
+ // if the pool is not completed add the pointer to it in place of the first nullptr found.
+ // Otherwise do nothing, pointers will be freed once the pool is deallocated.
+ for (int i = 0; i < MAX_POOL_SIZE; ++i) {
+ ggml_sycl_buffer & b = buffer_pool[i];
+ if (b.ptr == nullptr) {
+ b.ptr = ptr;
+ b.size = size;
+ return;
+ }
+ }
+ }
+};
+
+std::unique_ptr<ggml_sycl_pool> ggml_backend_sycl_context::new_pool_for_host(queue_ptr qptr, int device) {
+ // return pool for the host to speed up memory management
+ return std::unique_ptr<ggml_sycl_pool>(new ggml_sycl_pool_host(qptr, device));
+}
+
+std::unique_ptr<ggml_sycl_pool> ggml_backend_sycl_context::new_pool_for_device(queue_ptr qptr, int device) {
+ // TBD: NO VMM support
+ // if (ggml_sycl_info().devices[device].vmm) {
+ // return std::unique_ptr<ggml_sycl_pool>(new ggml_sycl_pool_vmm(device));
+ // }
+ return std::unique_ptr<ggml_sycl_pool>(new ggml_sycl_pool_leg(qptr, device));
+}
+
+// TBD pool with virtual memory management
+// struct ggml_sycl_pool_vmm : public ggml_sycl_pool
+
+/// kernels
+typedef void (*ggml_sycl_op_mul_mat_t)(
+ ggml_backend_sycl_context & ctx,
+ const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
+ const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
+ float *dst_dd_i, const int64_t row_low, const int64_t row_high,
+ const int64_t src1_ncols, const int64_t src1_padded_row_size,
+ const queue_ptr &stream);
+
+
+
+static void mul_mat_p021_f16_f32(
+ const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int nchannels_x, const int nchannels_y,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const sycl::half *x = (const sycl::half *)vx;
+
+ const int row_x = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
+ item_ct1.get_local_id(1);
+ const int channel = item_ct1.get_local_range(0) * item_ct1.get_group(0) +
+ item_ct1.get_local_id(0);
+ const int channel_x = channel / (nchannels_y / nchannels_x);
+
+ const int nrows_y = ncols_x;
+ const int nrows_dst = nrows_x;
+ const int row_dst = row_x;
+
+ float tmp = 0.0f;
+
+ for (int col_x0 = 0; col_x0 < ncols_x;
+ col_x0 += item_ct1.get_local_range(2)) {
+ const int col_x = col_x0 + item_ct1.get_local_id(2);
+
+ if (col_x >= ncols_x) {
+ break;
+ }
+
+ // x is transposed and permuted
+ const int ix = row_x*nchannels_x*ncols_x + channel_x*ncols_x + col_x;
+ const float xi =
+ sycl::vec<sycl::half, 1>(x[ix])
+ .convert<float, sycl::rounding_mode::automatic>()[0];
+
+ const int row_y = col_x;
+
+
+ // y is not transposed but permuted
+ const int iy = channel*nrows_y + row_y;
+
+ tmp += xi * y[iy];
+ }
+
+ // dst is not transposed and not permuted
+ const int idst = channel*nrows_dst + row_dst;
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[idst] = tmp;
+ }
+}
+
+static void mul_mat_vec_nc_f16_f32( // nc == non-contiguous
+ const void * __restrict__ vx, const float * __restrict__ y, float * __restrict__ dst, const int ncols_x, const int nrows_x,
+ const int row_stride_x, const int channel_stride_x,const int channel_stride_y, const int channel_x_divisor,
+ const sycl::nd_item<3> &item_ct1) {
+
+ const sycl::half *x = (const sycl::half *)vx;
+
+ const int row_x = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
+ item_ct1.get_local_id(1);
+ const int channel = item_ct1.get_local_range(0) * item_ct1.get_group(0) +
+ item_ct1.get_local_id(0);
+ const int channel_x = channel / channel_x_divisor;
+
+ const int nrows_dst = nrows_x;
+ const int row_dst = row_x;
+
+ const int idst = channel*nrows_dst + row_dst;
+
+ float tmp = 0.0f;
+
+ for (int col_x0 = 0; col_x0 < ncols_x;
+ col_x0 += item_ct1.get_local_range(2)) {
+ const int col_x = col_x0 + item_ct1.get_local_id(2);
+
+ if (col_x >= ncols_x) {
+ break;
+ }
+
+ const int row_y = col_x;
+
+ const int ix = channel_x*channel_stride_x + row_x*row_stride_x + col_x;
+ const int iy = channel * channel_stride_y + row_y;
+
+ const float xi =
+ sycl::vec<sycl::half, 1>(x[ix])
+ .convert<float, sycl::rounding_mode::automatic>()[0];
+
+ tmp += xi * y[iy];
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[idst] = tmp;
+ }
+}
+
+static void k_sum_rows_f32(const float * x, float * dst, const int ncols,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(1);
+ const int col = item_ct1.get_local_id(2);
+
+ float sum = 0.0f;
+ for (int i = col; i < ncols; i += item_ct1.get_local_range(2)) {
+ sum += x[row * ncols + i];
+ }
+
+ sum = warp_reduce_sum(sum, item_ct1);
+
+ if (col == 0) {
+ dst[row] = sum;
+ }
+}
+
+
+template<typename T>
+static inline void ggml_sycl_swap(T & a, T & b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+template <ggml_sort_order order>
+__dpct_inline__ static void
+k_argsort_f32_i32(const float *x, int *dst, const int ncols, int ncols_pad,
+ const int tasks_per_thread, const sycl::nd_item<3> &item_ct1,
+ uint8_t *dpct_local) {
+ // bitonic sort
+ int col_index = item_ct1.get_local_id(2);
+ int row = item_ct1.get_group(1);
+
+ for (int i = 0; i < tasks_per_thread; i++) {
+ int col = col_index * tasks_per_thread + i;
+ if (col >= ncols_pad) {
+ return;
+ }
+ }
+
+ const float * x_row = x + row * ncols;
+ auto dst_row = (int *)dpct_local;
+
+ // initialize indices
+ for (int i=0;i<tasks_per_thread;i++){
+ int col = col_index*tasks_per_thread+i;
+ dst_row[col] = col;
+ }
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ for (int k = 2; k <= ncols_pad; k *= 2) {
+ for (int j = k / 2; j > 0; j /= 2) {
+ for (int i = 0; i < tasks_per_thread; i++) {
+ int col = col_index * tasks_per_thread + i;
+ int ixj = col ^ j;
+ if (ixj > col) {
+ if ((col & k) == 0) {
+ if (dst_row[col] >= ncols ||
+ (dst_row[ixj] < ncols &&
+ (order == GGML_SORT_ORDER_ASC
+ ? x_row[dst_row[col]] > x_row[dst_row[ixj]]
+ : x_row[dst_row[col]] <
+ x_row[dst_row[ixj]]))) {
+ ggml_sycl_swap(dst_row[col], dst_row[ixj]);
+ }
+ } else {
+ if (dst_row[ixj] >= ncols ||
+ (dst_row[col] < ncols &&
+ (order == GGML_SORT_ORDER_ASC
+ ? x_row[dst_row[col]] < x_row[dst_row[ixj]]
+ : x_row[dst_row[col]] >
+ x_row[dst_row[ixj]]))) {
+ ggml_sycl_swap(dst_row[col], dst_row[ixj]);
+ }
+ }
+ }
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ }
+ }
+ }
+
+ // copy the result to dst without the padding
+ for (int i = 0; i < tasks_per_thread; i++) {
+ int col = col_index * tasks_per_thread + i;
+ if (col < ncols) {
+ dst[row * ncols + col] = dst_row[col];
+ }
+ }
+}
+
+static void diag_mask_inf_f32(const float * x, float * dst, const int ncols, const int rows_per_channel, const int n_past,
+ const sycl::nd_item<3> &item_ct1) {
+ const int col = item_ct1.get_local_range(1) * item_ct1.get_group(1) +
+ item_ct1.get_local_id(1);
+ const int row = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
+ item_ct1.get_local_id(2);
+
+ if (col >= ncols) {
+ return;
+ }
+
+ const int i = row*ncols + col;
+ //dst[i] = col > (n_past + row % rows_per_channel) ? -INFINITY : x[i];
+ //dst[i] = x[i] - (col > n_past + row % rows_per_channel) * INT_MAX; // equivalent within rounding error but slightly faster on GPU
+ dst[i] = x[i] - (col > n_past + row % rows_per_channel) * FLT_MAX;
+}
+
+static void scale_f32(const float * x, float * dst, const float scale, const float bias, const int k,
+ const sycl::nd_item<3> &item_ct1) {
+ const int i = item_ct1.get_local_range(2) * item_ct1.get_group(2) +
+ item_ct1.get_local_id(2);
+
+ if (i >= k) {
+ return;
+ }
+
+ dst[i] = scale * x[i] + bias;
+}
+
+
+template <typename Ti, typename To>
+static void pool2d_nchw_kernel(
+ const int ih, const int iw, const int oh, const int ow,
+ const int kh, const int kw, const int sh, const int sw,
+ const int ph, const int pw, const int parallel_elements,
+ const Ti* src, To* dst, const enum ggml_op_pool op,
+ const sycl::nd_item<3> &item_ct1) {
+ int idx = item_ct1.get_local_id(2) +
+ item_ct1.get_group(2) * item_ct1.get_local_range(2);
+ if (idx >= parallel_elements) {
+ return;
+ }
+
+ const int I_HW = ih * iw;
+ const int O_HW = oh * ow;
+ const int nc = idx / O_HW;
+ const int cur_oh = idx % O_HW / ow;
+ const int cur_ow = idx % O_HW % ow;
+ const Ti* i_ptr = src + nc * I_HW;
+ To* o_ptr = dst + nc * O_HW;
+ const int start_h = cur_oh * sh - ph;
+ const int bh = sycl::max(0, start_h);
+ const int eh = sycl::min(ih, start_h + kh);
+ const int start_w = cur_ow * sw - pw;
+ const int bw = sycl::max(0, start_w);
+ const int ew = sycl::min(iw, start_w + kw);
+
+ To res = 0;
+
+ switch (op) {
+ case GGML_OP_POOL_AVG: res = 0; break;
+ case GGML_OP_POOL_MAX: res = -FLT_MAX; break;
+ default:
+ res = (To) sycl::nan(uint32_t(0));
+ break;
+ }
+
+ for (int i = bh; i < eh; i += 1) {
+ for (int j = bw; j < ew; j += 1) {
+#if DPCT_COMPATIBILITY_TEMP >= 350
+ /*
+ DPCT1098:106: The '*' expression is used instead of the __ldg
+ call. These two expressions do not provide the exact same
+ functionality. Check the generated code for potential precision
+ and/or performance issues.
+ */
+ Ti cur = *(i_ptr + i * iw + j);
+#else
+ Ti cur = i_ptr[i * iw + j];
+#endif
+ switch (op) {
+ case GGML_OP_POOL_AVG: res += (cur / (kh * kw)); break;
+ case GGML_OP_POOL_MAX: res = sycl::max(res, (To)cur); break;
+ default:
+ res = (To) sycl::nan(uint32_t(0));
+ break;
+ }
+ }
+ }
+ o_ptr[cur_oh * ow + cur_ow] = res;
+}
+
+
+static void ggml_mul_mat_p021_f16_f32_sycl(const void *vx, const float *y,
+ float *dst, const int ncols_x,
+ const int nrows_x,
+ const int nchannels_x,
+ const int nchannels_y,
+ queue_ptr stream) {
+
+ const sycl::range<3> block_nums(nchannels_y, nrows_x, 1);
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_p021_f16_f32(vx, y, dst, ncols_x, nrows_x, nchannels_x,
+ nchannels_y, item_ct1);
+ });
+ }
+}
+
+static void ggml_mul_mat_vec_nc_f16_f32_sycl(
+ const void *vx, const float *y, float *dst, const int ncols_x,
+ const int nrows_x, const int row_stride_x, const int nchannels_x,
+ const int nchannels_y, const int channel_stride_x, const int channel_stride_y, queue_ptr stream) {
+
+ const sycl::range<3> block_nums(nchannels_y, nrows_x, 1);
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_nc_f16_f32(vx, y, dst, ncols_x, nrows_x,
+ row_stride_x, channel_stride_x, channel_stride_y,
+ nchannels_y / nchannels_x, item_ct1);
+ });
+ }
+}
+
+
+
+static void scale_f32_sycl(const float *x, float *dst, const float scale, const float bias,
+ const int k, queue_ptr stream) {
+ const int num_blocks = (k + SYCL_SCALE_BLOCK_SIZE - 1) / SYCL_SCALE_BLOCK_SIZE;
+ stream->parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_blocks) *
+ sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_SCALE_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ scale_f32(x, dst, scale, bias, k, item_ct1);
+ });
+}
+
+
+static void sum_rows_f32_sycl(const float *x, float *dst, const int ncols,
+ const int nrows, queue_ptr stream) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ const sycl::range<3> block_nums(1, nrows, 1);
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ k_sum_rows_f32(x, dst, ncols, item_ct1);
+ });
+}
+
+static int next_power_of_2(int x) {
+ int n = 1;
+ while (n < x) {
+ n *= 2;
+ }
+ return n;
+}
+
+static void argsort_f32_i32_sycl(const float *x, int *dst, const int ncols,
+ const int nrows, ggml_sort_order order,
+ queue_ptr stream, int device) {
+ // bitonic sort requires ncols to be power of 2
+ const int ncols_pad = next_power_of_2(ncols);
+
+ int nth = 1;
+ int max_block_size = ggml_sycl_info().max_work_group_sizes[device];
+ while (nth < ncols_pad && nth < max_block_size)
+ nth *= 2;
+ if (nth > max_block_size)
+ nth = max_block_size;
+
+ const int tasks_per_thread = ncols_pad / nth;
+
+ const sycl::range<3> block_dims(1, 1, nth);
+ const sycl::range<3> block_nums(1, nrows, 1);
+ const size_t shared_mem = ncols_pad * sizeof(int);
+ GGML_ASSERT(shared_mem<=ggml_sycl_info().devices[device].smpbo);
+
+ if (order == GGML_SORT_ORDER_ASC) {
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
+ sycl::range<1>(shared_mem), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_argsort_f32_i32<GGML_SORT_ORDER_ASC>(
+ x, dst, ncols, ncols_pad, tasks_per_thread, item_ct1,
+ dpct_local_acc_ct1
+ .get_multi_ptr<sycl::access::decorated::no>()
+ .get());
+ });
+ });
+ } else if (order == GGML_SORT_ORDER_DESC) {
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
+ sycl::range<1>(shared_mem), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_argsort_f32_i32<GGML_SORT_ORDER_DESC>(
+ x, dst, ncols, ncols_pad, tasks_per_thread, item_ct1,
+ dpct_local_acc_ct1
+ .get_multi_ptr<sycl::access::decorated::no>()
+ .get());
+ });
+ });
+ } else {
+ GGML_ABORT("fatal error");
+ }
+}
+
+static void top_k_f32_sycl(
+ const float * src,
+ int32_t * dst_indices,
+ const int64_t ncols,
+ const int64_t nrows,
+ const int k,
+ dpct::queue_ptr main_stream
+) {
+ const int block_size = 128;
+
+ const sycl::range<1> block_dims(block_size);
+ const sycl::range<1> grid_dims(nrows);
+
+ main_stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<float, 1> shared_vals(sycl::range<1>(block_size * k), cgh);
+ sycl::local_accessor<int, 1> shared_idx(sycl::range<1>(block_size * k), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<1>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<1> item_ct1) {
+ const int row = item_ct1.get_group(0);
+ const int tid = item_ct1.get_local_id(0);
+
+ if (row >= nrows) return;
+
+ const float * src_row = src + row * ncols;
+ int32_t * dst_idx_row = dst_indices + row * k;
+
+ float local_vals[32];
+ int local_idx[32];
+
+ for (int i = 0; i < k; i++) {
+ local_vals[i] = -FLT_MAX;
+ local_idx[i] = -1;
+ }
+
+ for (int col = tid; col < ncols; col += block_size) {
+ float val = src_row[col];
+
+ if (val > local_vals[k-1]) {
+ int pos = k - 1;
+ while (pos > 0 && val > local_vals[pos - 1]) {
+ pos--;
+ }
+
+ for (int i = k - 1; i > pos; i--) {
+ local_vals[i] = local_vals[i - 1];
+ local_idx[i] = local_idx[i - 1];
+ }
+ local_vals[pos] = val;
+ local_idx[pos] = col;
+ }
+ }
+
+ for (int i = 0; i < k; i++) {
+ shared_vals[tid * k + i] = local_vals[i];
+ shared_idx[tid * k + i] = local_idx[i];
+ }
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ if (tid == 0) {
+ float final_vals[32];
+ int final_idx[32];
+
+ for (int i = 0; i < k; i++) {
+ final_vals[i] = -FLT_MAX;
+ final_idx[i] = -1;
+ }
+
+ for (int t = 0; t < block_size; t++) {
+ for (int i = 0; i < k; i++) {
+ float val = shared_vals[t * k + i];
+ int idx = shared_idx[t * k + i];
+
+ if (val > final_vals[k-1]) {
+ int pos = k - 1;
+ while (pos > 0 && val > final_vals[pos - 1]) {
+ pos--;
+ }
+
+ for (int j = k - 1; j > pos; j--) {
+ final_vals[j] = final_vals[j - 1];
+ final_idx[j] = final_idx[j - 1];
+ }
+ final_vals[pos] = val;
+ final_idx[pos] = idx;
+ }
+ }
+ }
+
+ for (int i = 0; i < k; i++) {
+ dst_idx_row[i] = final_idx[i];
+ }
+
+ if (k > 1) {
+ int32_t temp = dst_idx_row[0];
+ dst_idx_row[0] = dst_idx_row[1];
+ dst_idx_row[1] = temp;
+ }
+ }
+ });
+ });
+}
+
+static void argmax_f32_i32_sycl(const float *x, int *dst, const int ncols,
+ const int nrows, queue_ptr stream) {
+ const sycl::range<3> block_dims(1, 1, SYCL_ARGMAX_BLOCK_SIZE);
+ const sycl::range<3> block_nums(1, nrows, 1);
+ const size_t shared_mem = 256 * sizeof(float);
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<float, 1> shared_data(
+ sycl::range<1>(shared_mem/sizeof(float)), cgh);
+ sycl::local_accessor<int, 1> shared_indices(
+ sycl::range<1>(shared_mem/sizeof(float)), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ const int tid = item_ct1.get_local_id(2);
+ const int row = item_ct1.get_global_id(1);
+
+ float max_val = -INFINITY;
+ int max_idx = -1;
+
+ for (int col = tid; col < ncols; col += 256) {
+ float val = x[row * ncols + col];
+ if (val > max_val) {
+ max_val = val;
+ max_idx = col;
+ }
+ }
+
+ shared_data[tid] = max_val;
+ shared_indices[tid] = max_idx;
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ for (int stride = 256/2; stride > 0; stride >>= 1) {
+ if (tid < stride) {
+ float val1 = shared_data[tid];
+ float val2 = shared_data[tid + stride];
+ if (val2 > val1) {
+ shared_data[tid] = val2;
+ shared_indices[tid] = shared_indices[tid + stride];
+ }
+ }
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ }
+
+
+ if (tid == 0) {
+ dst[row] = shared_indices[0];
+ }
+ });
+ });
+}
+static void diag_mask_inf_f32_sycl(const float *x, float *dst,
+ const int ncols_x, const int nrows_x,
+ const int rows_per_channel, const int n_past,
+ queue_ptr stream) {
+ const sycl::range<3> block_dims(1, SYCL_DIAG_MASK_INF_BLOCK_SIZE, 1);
+ const int block_num_x = (ncols_x + SYCL_DIAG_MASK_INF_BLOCK_SIZE - 1) / SYCL_DIAG_MASK_INF_BLOCK_SIZE;
+ const sycl::range<3> block_nums(1, block_num_x, nrows_x);
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ diag_mask_inf_f32(x, dst, ncols_x,
+ rows_per_channel, n_past,
+ item_ct1);
+ });
+}
+
+static dpct::err0 ggml_sycl_cpy_tensor_2d(void *dst,
+ const struct ggml_tensor *src,
+ int64_t i3, int64_t i2,
+ int64_t i1_low, int64_t i1_high,
+ queue_ptr stream) try {
+
+ dpct::memcpy_direction kind;
+ char * src_ptr;
+ if (ggml_backend_buffer_is_host(src->buffer)) {
+ kind = dpct::host_to_device;
+ //GGML_SYCL_DEBUG("%s: Host buffer type src tensor\n", __func__);
+ src_ptr = (char *) src->data;
+ // GGML_SYCL_DEBUG("ggml_sycl_cpy_tensor_2d GGML_BACKEND_TYPE_CPU src_ptr %p\n", src_ptr);
+ } else if (ggml_backend_buffer_is_sycl(src->buffer)) {
+ // If buffer is a SYCL buffer
+ //GGML_SYCL_DEBUG("%s: SYCL buffer type src tensor\n", __func__);
+ kind = dpct::device_to_device;
+ src_ptr = (char *) src->data;
+ } else if (ggml_backend_buffer_is_sycl_split(src->buffer)) {
+ /*
+ If buffer is a SYCL split buffer
+ */
+ //GGML_SYCL_DEBUG("%s: Split buffer type src tensor\n", __func__);
+ GGML_ASSERT(i1_low == 0 && i1_high == src->ne[1]);
+ kind = dpct::device_to_device;
+ ggml_tensor_extra_gpu * extra = (ggml_tensor_extra_gpu *) src->extra;
+ int id;
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ id = get_current_device_id()));
+ // GGML_SYCL_DEBUG("current device index %d\n", id);
+ src_ptr = (char *) extra->data_device[id];
+ } else {
+ // GGML_SYCL_DEBUG("GGML_ABORT("fatal error")\n");
+ GGML_ABORT("fatal error");
+ }
+ char * dst_ptr = (char *) dst;
+
+ GGML_TENSOR_LOCALS_1(int64_t, ne, src, ne);
+ GGML_TENSOR_LOCALS(int64_t, nb, src, nb);
+ const enum ggml_type type = src->type;
+ const int64_t ts = ggml_type_size(type);
+ const int64_t bs = ggml_blck_size(type);
+ int64_t i1_diff = i1_high - i1_low;
+
+ const char * x = src_ptr + i1_low*nb1 + i2*nb2 + i3*nb3;
+ if (nb0 == ts && nb1 == ts*ne0/bs) {
+ // GGML_SYCL_DEBUG("stream->memcpy: dst_ptr=%p, x=%p, size=%lu\n", dst_ptr, x, i1_diff * nb1);
+ // return CHECK_TRY_ERROR(stream->memcpy(dst_ptr, x, i1_diff * nb1));
+ return CHECK_TRY_ERROR(dpct::async_dpct_memcpy(dst_ptr, x, i1_diff * nb1,
+ kind, *stream));
+
+ } else if (nb0 == ts) {
+ return CHECK_TRY_ERROR(
+ dpct::async_dpct_memcpy(dst_ptr, ts * ne0 / bs, x, nb1,
+ ts * ne0 / bs, i1_diff, kind, *stream));
+ } else {
+ for (int64_t i1 = 0; i1 < i1_diff; i1++) {
+ const void * rx = (const void *) ((const char *) x + i1*nb1);
+ void * rd = (void *) (dst_ptr + i1*ts*ne0/bs);
+ // pretend the row is a matrix with cols=1
+ dpct::err0 r = CHECK_TRY_ERROR(dpct::async_dpct_memcpy(
+ rd, ts / bs, rx, nb0, ts / bs, ne0, kind, *stream));
+ /*
+ DPCT1001:85: The statement could not be removed.
+ */
+ /*
+ DPCT1000:86: Error handling if-stmt was detected but could not be
+ rewritten.
+ */
+ if (r != 0) return r;
+ }
+ return 0;
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+inline void ggml_sycl_op_mul_mat_sycl(
+ ggml_backend_sycl_context & ctx,
+ const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
+ const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
+ float *dst_dd_i, const int64_t row_low, const int64_t row_high,
+ const int64_t src1_ncols, const int64_t src1_padded_row_size,
+ const queue_ptr &stream) try {
+
+ GGML_ASSERT(src0_dd_i != nullptr);
+ GGML_ASSERT(src1_ddf_i != nullptr);
+ GGML_ASSERT(dst_dd_i != nullptr);
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t ne10 = src1->ne[0];
+ GGML_ASSERT(ne00 == ne10);
+
+ const int64_t row_diff = row_high - row_low;
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+
+ const int64_t ne0 = dst->ne[0]; // used by MKL only
+ // the main device has a larger memory buffer to hold the results from all GPUs
+ // ldc == nrows of the matrix that cuBLAS writes into
+ int ldc = id == ctx.device ? ne0 : row_diff; // used by MKL only
+
+#ifdef GGML_SYCL_F16
+ bool use_fp16 = true; // TODO(Yu) SYCL capability check
+#else
+ bool use_fp16 = false;
+#endif
+ if ((src0->type == GGML_TYPE_F16 || ggml_is_quantized(src0->type)) && use_fp16 && ggml_is_contiguous(src0) &&
+ row_diff == src0->ne[1] && dst->op_params[0] == GGML_PREC_DEFAULT) {
+ ggml_sycl_pool_alloc<sycl::half> src0_as_f16(ctx.pool());
+ if (src0->type != GGML_TYPE_F16) {
+ scope_op_debug_print scope_dbg_print(__func__, "/to_fp16_sycl", dst, /*num_src=*/2,
+ " : converting src0 to fp16");
+ const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src0->type, dst);
+ GGML_ASSERT(to_fp16_sycl != nullptr);
+ size_t ne = row_diff*ne00;
+ src0_as_f16.alloc(ne);
+ to_fp16_sycl(src0_dd_i, src0_as_f16.get(), ne, stream);
+ }
+ const sycl::half *src0_ptr = src0->type == GGML_TYPE_F16
+ ? (const sycl::half *)src0_dd_i
+ : src0_as_f16.get();
+
+ ggml_sycl_pool_alloc<sycl::half> src1_as_f16(ctx.pool());
+ if (src1->type != GGML_TYPE_F16) {
+ scope_op_debug_print scope_dbg_print(__func__, "/to_fp16_sycl", dst, /*num_src=*/2,
+ " : converting src1 to fp16");
+ const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type, dst);
+ GGML_ASSERT(to_fp16_sycl != nullptr);
+ size_t ne = src1_ncols*ne10;
+ src1_as_f16.alloc(ne);
+ to_fp16_sycl(src1_ddf_i, src1_as_f16.get(), ne, stream);
+ }
+ const sycl::half *src1_ptr = src1->type == GGML_TYPE_F16
+ ? (const sycl::half *)src1->data + src1_padded_row_size
+ : src1_as_f16.get();
+
+#if GGML_SYCL_DNNL
+ if (!g_ggml_sycl_disable_dnn) {
+ DnnlGemmWrapper::row_gemm(ctx,row_diff, src1_ncols , ne10, src0_ptr,
+ DnnlGemmWrapper::to_dt<sycl::half>(), src1_ptr, DnnlGemmWrapper::to_dt<sycl::half>(),
+ dst_dd_i, DnnlGemmWrapper::to_dt<float>(), stream);
+ }
+ else
+#endif
+ {
+ ggml_sycl_pool_alloc<sycl::half> dst_f16(ctx.pool(), row_diff * src1_ncols);
+
+ const sycl::half alpha_f16 = 1.0f;
+ const sycl::half beta_f16 = 0.0f;
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm(
+ *stream, oneapi::mkl::transpose::trans,
+ oneapi::mkl::transpose::nontrans, row_diff, src1_ncols, ne10,
+ &alpha_f16, src0_ptr, dpct::library_data_t::real_half, ne00,
+ src1_ptr, dpct::library_data_t::real_half, ne10, &beta_f16,
+ dst_f16.get(), dpct::library_data_t::real_half, ldc,
+ dpct::library_data_t::real_half)));
+ scope_op_debug_print scope_dbg_print(__func__, "/to_fp32_sycl", dst, /*num_src=*/2,
+ " : converting dst to fp32");
+ const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(GGML_TYPE_F16, dst);
+ to_fp32_sycl(dst_f16.get(), dst_dd_i, row_diff*src1_ncols, stream);
+ }
+ } else {
+ ggml_sycl_pool_alloc<float> src0_ddq_as_f32(ctx.pool());
+ ggml_sycl_pool_alloc<float> src1_ddq_as_f32(ctx.pool());
+ if (src0->type != GGML_TYPE_F32) {
+ scope_op_debug_print scope_dbg_print(__func__, "/to_fp32_sycl", dst, /*num_src=*/2,
+ " : converting src0 to fp32");
+ const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(src0->type, dst);
+ GGML_ASSERT(to_fp32_sycl != nullptr);
+ src0_ddq_as_f32.alloc(row_diff*ne00);
+ to_fp32_sycl(src0_dd_i, src0_ddq_as_f32.get(), row_diff*ne00, stream);
+ }
+ if (src1->type != GGML_TYPE_F32) {
+ scope_op_debug_print scope_dbg_print(__func__, "/to_fp32_sycl", dst, /*num_src=*/2,
+ " : converting src1 to fp32");
+ const to_fp32_sycl_t to_fp32_sycl = ggml_get_to_fp32_sycl(src1->type, dst);
+ GGML_ASSERT(to_fp32_sycl != nullptr);
+ src1_ddq_as_f32.alloc(src1_ncols*ne10);
+ to_fp32_sycl(src1_ddf_i, src1_ddq_as_f32.get(), src1_ncols*ne10, stream);
+ }
+ const float * src0_ddf_i = src0->type == GGML_TYPE_F32 ? (const float *) src0_dd_i : src0_ddq_as_f32.get();
+ const float * src1_ddf1_i = src1->type == GGML_TYPE_F32 ? (const float *) src1_ddf_i : src1_ddq_as_f32.get();
+
+#if GGML_SYCL_DNNL
+ if (!g_ggml_sycl_disable_dnn) {
+ DnnlGemmWrapper::row_gemm(ctx, row_diff, src1_ncols, ne10, src0_ddf_i,
+ DnnlGemmWrapper::to_dt<float>(), src1_ddf1_i, DnnlGemmWrapper::to_dt<float>(),
+ dst_dd_i, DnnlGemmWrapper::to_dt<float>(), stream);
+ }
+ else
+#endif
+ {
+ const float alpha = 1.0f;
+ const float beta = 0.0f;
+ SYCL_CHECK(CHECK_TRY_ERROR(oneapi::mkl::blas::column_major::gemm(
+ *stream, oneapi::mkl::transpose::trans, oneapi::mkl::transpose::nontrans, row_diff,
+ src1_ncols, ne10, dpct::get_value(&alpha, *stream), src0_ddf_i, ne00, src1_ddf1_i, ne10,
+ dpct::get_value(&beta, *stream), dst_dd_i, ldc)));
+ }
+ }
+ GGML_UNUSED(dst);
+ GGML_UNUSED(src1_ddq_i);
+ GGML_UNUSED(src1_padded_row_size);
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_sycl_op_pool2d(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ const int32_t * opts = (const int32_t *)dst->op_params;
+ enum ggml_op_pool op = static_cast<ggml_op_pool>(opts[0]);
+ const int k0 = opts[1];
+ const int k1 = opts[2];
+ const int s0 = opts[3];
+ const int s1 = opts[4];
+ const int p0 = opts[5];
+ const int p1 = opts[6];
+
+ const int64_t IH = dst->src[0]->ne[1];
+ const int64_t IW = dst->src[0]->ne[0];
+
+ const int64_t N = dst->ne[3];
+ const int64_t OC = dst->ne[2];
+ const int64_t OH = dst->ne[1];
+ const int64_t OW = dst->ne[0];
+
+ const int parallel_elements = N * OC * OH * OW;
+ const int num_blocks = (parallel_elements + SYCL_POOL2D_BLOCK_SIZE - 1) / SYCL_POOL2D_BLOCK_SIZE;
+ sycl::range<3> block_nums(1, 1, num_blocks);
+ main_stream->parallel_for(
+ sycl::nd_range<3>(block_nums *
+ sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_IM2COL_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ pool2d_nchw_kernel(IH, IW, OH, OW, k1, k0, s1, s0, p1, p0,
+ parallel_elements, src0_dd, dst_dd, op,
+ item_ct1);
+ });
+}
+
+inline void ggml_sycl_op_sum(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ const int64_t ne = ggml_nelements(dst->src[0]);
+
+ sum_rows_f32_sycl(src0_dd, dst_dd, ne, 1, main_stream);
+}
+
+inline void ggml_sycl_op_sum_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ const int64_t ncols = dst->src[0]->ne[0];
+ const int64_t nrows = ggml_nrows(dst->src[0]);
+
+ sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream);
+}
+
+inline void ggml_sycl_op_mean(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ const int64_t ncols = dst->src[0]->ne[0];
+ const int64_t nrows = ggml_nrows(dst->src[0]);
+
+ sum_rows_f32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream);
+
+ main_stream->parallel_for(
+ sycl::range<1>(nrows),
+ [=](sycl::id<1> row) {
+ dst_dd[row] /= ncols;
+ }
+ );
+}
+
+
+inline void ggml_sycl_op_argsort(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_I32);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ int32_t * dst_dd = static_cast<int32_t *>(dst->data);
+
+
+ const int64_t ncols = dst->src[0]->ne[0];
+ const int64_t nrows = ggml_nrows(dst->src[0]);
+
+ enum ggml_sort_order order = (enum ggml_sort_order) dst->op_params[0];
+
+ argsort_f32_i32_sycl(src0_dd, (int *)dst_dd, ncols, nrows, order,
+ main_stream, ctx.device);
+}
+
+static void ggml_sycl_op_top_k(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+
+ GGML_ASSERT(src0);
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_I32);
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ const float * src0_dd = static_cast<const float *>(src0->data);
+ int32_t * dst_dd = static_cast<int32_t *>(dst->data);
+
+ const int k = dst->ne[0];
+ const int64_t ncols = src0->ne[0];
+ const int64_t nrows = ggml_nrows(src0);
+
+ GGML_ASSERT(k > 0 && k <= 32);
+ GGML_ASSERT(k <= ncols);
+
+ top_k_f32_sycl(src0_dd, dst_dd, ncols, nrows, k, main_stream);
+}
+
+inline void ggml_sycl_op_argmax(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_I32);
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ int32_t * dst_dd = static_cast<int32_t *>(dst->data);
+
+ const int64_t ncols = dst->src[0]->ne[0];
+ const int64_t nrows = ggml_nrows(dst->src[0]);
+
+ argmax_f32_i32_sycl(src0_dd, dst_dd, ncols, nrows, main_stream);
+}
+
+inline void ggml_sycl_op_diag_mask_inf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ const int64_t ne00 = dst->src[0]->ne[0];
+ const int64_t ne01 = dst->src[0]->ne[1];
+ const int nrows0 = ggml_nrows(dst->src[0]);
+
+ const int n_past = ((int32_t *) dst->op_params)[0];
+
+ diag_mask_inf_f32_sycl(src0_dd, dst_dd, ne00, nrows0, ne01, n_past, main_stream);
+}
+
+static void tri_f32_sycl(
+ const float * src,
+ float * dst,
+ const int64_t ne0,
+ const int64_t ne1,
+ const int64_t ne2,
+ const int64_t ne3,
+ const ggml_tri_type ttype,
+ dpct::queue_ptr main_stream
+) {
+ const size_t total = (size_t) ne0 * (size_t) ne1 * (size_t) ne2 * (size_t) ne3;
+
+ main_stream->parallel_for(sycl::range<1>(total), [=](sycl::id<1> tid) {
+ const int64_t idx = (int64_t) tid[0];
+
+ const int64_t i0 = idx % ne0;
+ const int64_t t1 = idx / ne0;
+ const int64_t i1 = t1 % ne1;
+
+ bool keep = false;
+ switch (ttype) {
+ case GGML_TRI_TYPE_LOWER: keep = (i0 < i1); break;
+ case GGML_TRI_TYPE_LOWER_DIAG: keep = (i0 <= i1); break;
+ case GGML_TRI_TYPE_UPPER: keep = (i0 > i1); break;
+ case GGML_TRI_TYPE_UPPER_DIAG: keep = (i0 >= i1); break;
+ default: keep = false; break;
+ }
+
+ dst[idx] = keep ? src[idx] : 0.0f;
+ });
+}
+
+static void ggml_sycl_op_tri(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+ GGML_ASSERT(src0);
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(ggml_is_contiguous(src0));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ const float * src0_dd = static_cast<const float *>(src0->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ const ggml_tri_type ttype = (ggml_tri_type) ggml_get_op_params_i32(dst, 0);
+
+ const int64_t ne0 = src0->ne[0];
+ const int64_t ne1 = src0->ne[1];
+ const int64_t ne2 = src0->ne[2];
+ const int64_t ne3 = src0->ne[3];
+
+ tri_f32_sycl(src0_dd, dst_dd, ne0, ne1, ne2, ne3, ttype, main_stream);
+}
+
+
+inline void ggml_sycl_op_scale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ float scale;
+ float bias;
+ memcpy(&scale, (float *) dst->op_params + 0, sizeof(float));
+ memcpy(&bias, (float *) dst->op_params + 1, sizeof(float));
+
+ scale_f32_sycl(src0_dd, dst_dd, scale, bias, ggml_nelements(dst->src[0]), main_stream);
+ /*
+ DPCT1010:87: SYCL uses exceptions to report errors and does not use the
+ error codes. The call was replaced with 0. You need to rewrite this code.
+ */
+ SYCL_CHECK(0);
+}
+
+static void ggml_sycl_set_peer_access(const int n_tokens, int main_device) {
+ static bool peer_access_enabled = false;
+
+ const bool enable_peer_access = n_tokens <= GGML_SYCL_PEER_MAX_BATCH_SIZE;
+
+ if (peer_access_enabled == enable_peer_access) {
+ return;
+ }
+
+#ifdef NDEBUG
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ SYCL_CHECK(ggml_sycl_set_device(i));
+ }
+
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ SYCL_CHECK(ggml_sycl_set_device(i));
+
+ for (int id_other = 0; id_other < ggml_sycl_info().device_count; ++id_other) {
+ if (i == id_other) {
+ continue;
+ }
+ if (i != main_device && id_other != main_device) {
+ continue;
+ }
+
+ // int can_access_peer;
+ // SYCL_CHECK(syclDeviceCanAccessPeer(&can_access_peer, id, id_other));
+ // if (can_access_peer) {
+ // if (enable_peer_access) {
+ // SYCL_CHECK(syclDeviceEnablePeerAccess(id_other, 0));
+ // } else {
+ // SYCL_CHECK(syclDeviceDisablePeerAccess(id_other));
+ // }
+ // }
+ }
+ }
+#endif // NDEBUG
+
+ peer_access_enabled = enable_peer_access;
+}
+
+template <template <int> typename quantize_f>
+static void ggml_sycl_op_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
+ const ggml_tensor *src1, ggml_tensor *dst,
+ ggml_sycl_op_mul_mat_t op) try {
+
+ GGML_TENSOR_LOCALS(int64_t, ne0, src0, ne);
+
+ GGML_TENSOR_LOCALS(int64_t, ne1, src1, ne);
+ const int64_t nrows1 = ggml_nrows(src1);
+
+ GGML_ASSERT(ne03 == ne13);
+
+ const int64_t ne0 = dst->ne[0];
+ const int64_t ne1 = dst->ne[1];
+
+ const int nb2 = dst->nb[2];
+ const int nb3 = dst->nb[3];
+
+ GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(dst->buffer));
+ GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src1->buffer));
+ GGML_ASSERT(src1->type == GGML_TYPE_F32 || (src1->ne[2] == 1 && src1->ne[3] == 1));
+
+ GGML_ASSERT(ne12 >= ne02 && ne12 % ne02 == 0);
+
+ const int64_t i02_divisor = ne12 / ne02;
+
+ const size_t src0_ts = ggml_type_size(src0->type);
+ const size_t src0_bs = ggml_blck_size(src0->type);
+ const size_t q8_1_ts = sizeof(block_q8_1);
+ const size_t q8_1_bs = QK8_1;
+
+ ggml_tensor_extra_gpu * src0_extra = (ggml_tensor_extra_gpu *) src0->extra;
+ ggml_tensor_extra_gpu * src1_extra = (ggml_tensor_extra_gpu *) src1->extra;
+
+ const bool src0_is_contiguous = ggml_is_contiguous(src0);
+ const bool src1_is_contiguous = ggml_is_contiguous(src1);
+
+ int64_t src1_padded_col_size = GGML_PAD(ne10, MATRIX_ROW_PADDING);
+
+ const bool split = ggml_backend_buffer_is_sycl_split(src0->buffer);
+ GGML_ASSERT(!(split && ne02 > 1));
+ GGML_ASSERT(!(split && ne03 > 1));
+ GGML_ASSERT(!(split && ne02 < ne12));
+
+ std::array<float, GGML_SYCL_MAX_DEVICES> tensor_split;
+ if (split) {
+ // TODO: check that src0->buffer->buft is a split buffer type, replace GGML_BACKEND_TYPE_GPU_SPLIT check
+ // GGML_ASSERT(src0->buffer != nullptr && src0->buffer->buft == ...);
+ ggml_backend_sycl_split_buffer_type_context * buft_ctx = (ggml_backend_sycl_split_buffer_type_context *) src0->buffer->buft->context;
+ tensor_split = buft_ctx->tensor_split;
+ }
+
+ struct dev_data {
+ ggml_sycl_pool_alloc<char> src0_dd_alloc;
+ ggml_sycl_pool_alloc<float> src1_ddf_alloc;
+ ggml_sycl_pool_alloc<char> src1_ddq_alloc;
+ ggml_sycl_pool_alloc<float> dst_dd_alloc;
+
+ char *src0_dd = nullptr;
+ float *src1_ddf = nullptr; // float
+ char *src1_ddq = nullptr; // q8_1
+ float *dst_dd = nullptr;
+
+ int64_t row_low;
+ int64_t row_high;
+ };
+
+ dev_data dev[GGML_SYCL_MAX_DEVICES];
+
+ int used_devices = 0;
+ queue_ptr main_stream = ctx.stream();
+
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ // by default, use all rows
+ dev[i].row_low = 0;
+ dev[i].row_high = ne01;
+
+ // for multi GPU, get the row boundaries from tensor split
+ // and round to mul_mat_q tile sizes
+ if (split) {
+ const int64_t rounding = get_row_rounding(src0->type, tensor_split);
+
+ if (i != 0) {
+ dev[i].row_low = ne01*tensor_split[i];
+ if (dev[i].row_low < ne01) {
+ dev[i].row_low -= dev[i].row_low % rounding;
+ }
+ }
+
+ if (i != ggml_sycl_info().device_count - 1) {
+ dev[i].row_high = ne01*tensor_split[i + 1];
+ if (dev[i].row_high < ne01) {
+ dev[i].row_high -= dev[i].row_high % rounding;
+ }
+ }
+ }
+ }
+
+ constexpr bool quantize_enabled = !std::is_same_v<quantize_f<QK8_1 / WARP_SIZE>,
+ no_quantize_q8_1<QK8_1 / WARP_SIZE>>;
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ if ((!split && i != ctx.device) || dev[i].row_low == dev[i].row_high) {
+ continue;
+ }
+
+ used_devices++;
+
+ const bool src1_on_device = i == ctx.device;
+ const bool dst_on_device = i == ctx.device;
+
+ ggml_sycl_set_device(i);
+ queue_ptr stream = ctx.stream(i, 0);
+
+ if (src0_is_contiguous) {
+ dev[i].src0_dd = (char *) src0->data;
+ } else {
+ dev[i].src0_dd = dev[i].src0_dd_alloc.alloc(ctx.pool(i), ggml_nbytes(src0));
+ }
+
+ if (src1_on_device && src1_is_contiguous) {
+ dev[i].src1_ddf = (float *) src1->data;
+ } else {
+ dev[i].src1_ddf = dev[i].src1_ddf_alloc.alloc(ctx.pool(i), ggml_nelements(src1));
+ }
+
+ if constexpr(quantize_enabled) {
+ dev[i].src1_ddq = dev[i].src1_ddq_alloc.alloc(ctx.pool(i), nrows1*src1_padded_col_size*q8_1_ts/q8_1_bs);
+
+ if (src1_on_device && src1_is_contiguous) {
+ scope_op_debug_print scope_dbg_print(__func__, "/quantize_row_q8_1_sycl", dst,
+ /*num_src=*/2, " : converting src1 to Q8_1");
+ try {
+ quantize_row_q8_1_sycl<quantize_f>(dev[i].src1_ddf, dev[i].src1_ddq, ne10, nrows1, src1_padded_col_size, stream);
+ } catch (sycl::exception const &exc) {
+ std::cerr << "Quantize_row_q8_1_sycl error" << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+ }
+ }
+ }
+
+ if (dst_on_device) {
+ dev[i].dst_dd = (float *) dst->data;
+ } else {
+ const size_t size_dst_ddf = split ? (dev[i].row_high - dev[i].row_low)*ne1 : ggml_nelements(dst);
+ dev[i].dst_dd = dev[i].dst_dd_alloc.alloc(ctx.pool(i), size_dst_ddf);
+ }
+ }
+
+ // if multiple devices are used they need to wait for the main device
+ // here an event is recorded that signals that the main device has finished calculating the input data
+ if (split && used_devices > 1) {
+ ggml_sycl_set_device(ctx.device);
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ *src0_extra->events[ctx.device][0] =
+ ctx.stream()->ext_oneapi_submit_barrier()));
+ }
+
+ const int64_t src1_col_stride = split && used_devices > 1 ? MUL_MAT_SRC1_COL_STRIDE : ne11;
+ for (int64_t src1_col_0 = 0; src1_col_0 < ne11; src1_col_0 += src1_col_stride) {
+ const int64_t is = split ? (src1_col_0/src1_col_stride) % GGML_SYCL_MAX_STREAMS : 0;
+ const int64_t src1_ncols = src1_col_0 + src1_col_stride > ne11 ? ne11 - src1_col_0 : src1_col_stride;
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ if ((!split && i != ctx.device) || dev[i].row_low == dev[i].row_high) {
+ continue;
+ }
+
+ const bool src1_on_device = i == ctx.device;
+ const bool dst_on_device = i == ctx.device;
+ const int64_t row_diff = dev[i].row_high - dev[i].row_low;
+
+ ggml_sycl_set_device(i);
+ queue_ptr stream = ctx.stream(i, is);
+
+ // wait for main GPU data if necessary
+ if (split && (i != ctx.device || is != 0)) {
+ SYCL_CHECK(CHECK_TRY_ERROR(stream->ext_oneapi_submit_barrier(
+ {*src0_extra->events[ctx.device][0]})));
+ }
+
+ for (int64_t i0 = 0; i0 < ne13*ne12; ++i0) {
+ const int64_t i03 = i0 / ne12;
+ const int64_t i02 = i0 % ne12;
+
+ const size_t src1_ddq_i_offset = (i0*ne11 + src1_col_0) * src1_padded_col_size*q8_1_ts/q8_1_bs;
+
+ // for split tensors the data begins at i0 == i0_offset_low
+ char * src0_dd_i = dev[i].src0_dd + (i0/i02_divisor) * (ne01*ne00*src0_ts)/src0_bs;
+ float * src1_ddf_i = dev[i].src1_ddf + (i0*ne11 + src1_col_0) * ne10;
+ char * src1_ddq_i = dev[i].src1_ddq + src1_ddq_i_offset;
+ float * dst_dd_i = dev[i].dst_dd + (i0*ne1 + src1_col_0) * (dst_on_device ? ne0 : row_diff);
+
+ // the main device memory buffer can be on VRAM scratch, with space for all partial results
+ // in that case an offset on dst_ddf_i is needed
+ if (i == ctx.device) {
+ dst_dd_i += dev[i].row_low; // offset is 0 if no tensor split
+ }
+
+ // copy src0, src1 to device if necessary
+ if (src1_is_contiguous) {
+ if (i != ctx.device) {
+ if constexpr (quantize_enabled) {
+ char * src1_ddq_i_source = dev[ctx.device].src1_ddq + src1_ddq_i_offset;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(stream
+ ->memcpy(src1_ddq_i, src1_ddq_i_source,
+ src1_ncols * src1_padded_col_size * q8_1_ts / q8_1_bs)
+ .wait()));
+ } else {
+ float * src1_ddf_i_source = (float *) src1_extra->data_device[ctx.device];
+ src1_ddf_i_source += (i0 * ne11 + src1_col_0) * ne10;
+
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(dev2dev_memcpy(*stream, *main_stream, src1_ddf_i, src1_ddf_i_source,
+ src1_ncols * ne10 * sizeof(float))));
+ }
+ }
+ } else {
+ if (src1_on_device) {
+ SYCL_CHECK(ggml_sycl_cpy_tensor_2d(src1_ddf_i, src1, i03, i02, src1_col_0,
+ src1_col_0 + src1_ncols, stream));
+ } else {
+ GGML_ABORT("src1 is non-contiguous and not on device");
+ }
+
+ if constexpr (quantize_enabled) {
+ scope_op_debug_print scope_dbg_print(__func__, "/quantize_row_q8_1_sycl", dst,
+ /*num_src=*/2, " : converting src1 to Q8_1");
+ try {
+ quantize_row_q8_1_sycl<quantize_q8_1>(src1_ddf_i, src1_ddq_i, ne10, src1_ncols,
+ src1_padded_col_size, stream);
+ } catch (const sycl::exception & exc) {
+ std::cerr << "Quantize_row_q8_1_sycl error" << exc.what()
+ << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+ }
+ }
+ }
+
+ if (src1_col_0 == 0 && !src0_is_contiguous && i02 % i02_divisor == 0) {
+ SYCL_CHECK(ggml_sycl_cpy_tensor_2d(src0_dd_i, src0, i03, i02/i02_divisor, dev[i].row_low, dev[i].row_high, stream));
+ }
+ if (src1->type == GGML_TYPE_F16) {
+ src1_padded_col_size = (i0 * ne11 + src1_col_0) * ne10;
+ }
+ // do the computation
+ SYCL_CHECK(CHECK_TRY_ERROR(op(ctx, src0, src1, dst, src0_dd_i, src1_ddf_i, src1_ddq_i, dst_dd_i,
+ dev[i].row_low, dev[i].row_high, src1_ncols, src1_padded_col_size, stream)));
+
+ // copy dst to host or other device if necessary
+ if (!dst_on_device) {
+ void * dst_off_device = dst->data;
+ if (split) {
+ // src0 = weight matrix is saved as a transposed matrix for better memory layout.
+ // dst is NOT transposed.
+ // The outputs of matrix matrix multiplications can therefore NOT simply be concatenated for >1 GPU.
+ // Instead they need to be copied to the correct slice in ne0 = dst row index.
+ // If dst is a vector with ne0 == 1 then you don't have to do this but it still produces correct results.
+ float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
+ GGML_ASSERT(dst->nb[1] == ne0*sizeof(float));
+ dhf_dst_i += src1_col_0*ne0 + dev[i].row_low;
+
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::async_dpct_memcpy(
+ dhf_dst_i, ne0 * sizeof(float), dst_dd_i,
+ row_diff * sizeof(float), row_diff * sizeof(float),
+ src1_ncols, dpct::device_to_device, *stream)));
+ } else {
+ float * dhf_dst_i = (float *) ((char *) dst_off_device + i02*nb2 + i03*nb3);
+ GGML_ASSERT(dst->nb[1] == ne0*sizeof(float));
+ dhf_dst_i += src1_col_0*ne0;
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ stream->memcpy(dhf_dst_i, dst_dd_i,
+ src1_ncols * ne0 * sizeof(float)).wait()));
+ }
+ }
+
+ // add event for the main device to wait on until other device is done
+ if (split && (i != ctx.device || is != 0)) {
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ *src0_extra->events[i][is] =
+ stream->ext_oneapi_submit_barrier()));
+ }
+ }
+ }
+ }
+
+ // main device waits for all other devices to be finished
+ if (split && ggml_sycl_info().device_count > 1) {
+ int64_t is_max = (ne11 + MUL_MAT_SRC1_COL_STRIDE - 1) / MUL_MAT_SRC1_COL_STRIDE;
+ is_max = is_max <= GGML_SYCL_MAX_STREAMS ? is_max : GGML_SYCL_MAX_STREAMS;
+
+ ggml_sycl_set_device(ctx.device);
+ for (int i = 0; i < ggml_sycl_info().device_count; ++i) {
+ if (dev[i].row_low == dev[i].row_high) {
+ continue;
+ }
+ for (int64_t is = 0; is < is_max; ++is) {
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ ctx.stream()->ext_oneapi_submit_barrier(
+ {*src0_extra->events[i][is]})));
+ }
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_sycl_repeat_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_repeat_back(ctx, dst);
+}
+
+static void ggml_sycl_get_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_get_rows(ctx, dst);
+}
+
+static void ggml_sycl_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_norm(ctx, dst);
+}
+
+static void ggml_sycl_rms_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_rms_norm(ctx, dst);
+}
+
+static void ggml_sycl_rms_norm_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_rms_norm_back(ctx, dst);
+}
+
+static void ggml_sycl_l2_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_l2_norm(ctx, dst);
+}
+
+static void ggml_sycl_group_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_group_norm(ctx, dst);
+}
+
+static void ggml_sycl_mul_mat_vec_p021(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
+ const ggml_tensor *src1,
+ ggml_tensor *dst) try {
+ GGML_ASSERT(ggml_is_permuted(src0) && ggml_is_permuted(src1));
+ GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer));
+ GGML_ASSERT(src0->nb[0] <= src0->nb[1] && src0->nb[2] <= src0->nb[3]); // 0213 permutation
+ GGML_ASSERT(src1->nb[0] <= src1->nb[1] && src1->nb[2] <= src1->nb[3]); // 0213 permutation
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t ne01 = src0->ne[1];
+ const int64_t ne02 = src0->ne[2];
+
+ const int64_t ne12 = src1->ne[2];
+
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ queue_ptr main_stream = ctx.stream();
+
+ void * src0_ddq = src0->data;
+ float * src1_ddf = (float *) src1->data;
+ float * dst_ddf = (float *) dst->data;
+
+ ggml_mul_mat_p021_f16_f32_sycl(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, ne02, ne12, main_stream);
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_sycl_mul_mat_vec_nc(ggml_backend_sycl_context & ctx, const ggml_tensor *src0,
+ const ggml_tensor *src1,
+ ggml_tensor *dst) try {
+ GGML_ASSERT(!ggml_is_transposed(src0));
+ GGML_ASSERT(!ggml_is_transposed(src1));
+ GGML_ASSERT(!ggml_is_permuted(src0));
+ GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer));
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT(src1->ne[1] == 1);
+ GGML_ASSERT(src1->ne[3] == 1);
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t ne01 = src0->ne[1];
+ const int64_t ne02 = src0->ne[2];
+
+ const int64_t nb01 = src0->nb[1];
+ const int64_t nb02 = src0->nb[2];
+
+ const int64_t ne12 = src1->ne[2];
+ const int64_t nb11 = src1->nb[1];
+
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ queue_ptr main_stream = ctx.stream();
+
+ void * src0_ddq = src0->data;
+ float * src1_ddf = (float *) src1->data;
+ float * dst_ddf = (float *) dst->data;
+
+ const int64_t row_stride_x = nb01 / sizeof(sycl::half);
+ const int64_t channel_stride_x = nb02 / sizeof(sycl::half);
+ const int64_t channel_stride_y = nb11 / sizeof(float);
+
+ ggml_mul_mat_vec_nc_f16_f32_sycl(src0_ddq, src1_ddf, dst_ddf, ne00, ne01, row_stride_x, ne02, ne12, channel_stride_x,channel_stride_y, main_stream);
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void k_compute_batched_ptrs(const sycl::half * src0_as_f16, const sycl::half * src1_as_f16, void * dst,
+ const void ** ptrs_src, void ** ptrs_dst, int64_t ne12, int64_t ne13, int64_t ne23,
+ size_t nb02, size_t nb03, size_t nb12, size_t nb13, size_t nbd2, size_t nbd3,
+ int64_t r2, int64_t r3, const sycl::nd_item<3> & item_ct1) {
+ const int64_t i13 = item_ct1.get_group(2) * item_ct1.get_local_range(2) + item_ct1.get_local_id(2);
+ const int64_t i12 = item_ct1.get_group(1) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1);
+
+ if (i13 >= ne13 || i12 >= ne12) {
+ return;
+ }
+
+ const int64_t i03 = i13 / r3;
+ const int64_t i02 = i12 / r2;
+
+ const uint8_t * src0_bytes = reinterpret_cast<const uint8_t *>(src0_as_f16);
+ const uint8_t * src1_bytes = reinterpret_cast<const uint8_t *>(src1_as_f16);
+ uint8_t * dst_bytes = static_cast<uint8_t *>(dst);
+
+ ptrs_src[0 * ne23 + i12 + i13 * ne12] = src0_bytes + i02 * nb02 + i03 * nb03;
+ ptrs_src[1 * ne23 + i12 + i13 * ne12] = src1_bytes + i12 * nb12 + i13 * nb13;
+ ptrs_dst[0 * ne23 + i12 + i13 * ne12] = dst_bytes + i12 * nbd2 + i13 * nbd3;
+}
+
+static void ggml_sycl_mul_mat_batched_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor * src0,
+ const ggml_tensor * src1, ggml_tensor * dst) try {
+ GGML_ASSERT(!ggml_is_transposed(src0));
+ GGML_ASSERT(!ggml_is_transposed(src1));
+ GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer));
+ GGML_ASSERT(src0->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ // TODO: see https://github.com/ggml-org/llama.cpp/pull/13155
+ // Batched mul_mat requires a rewrite to support both oneDNN and non-contiguous dst
+ GGML_ASSERT(ggml_is_contiguous(dst));
+
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ queue_ptr queue = ctx.stream();
+
+ dpct::has_capability_or_fail(queue->get_device(), { sycl::aspect::fp16 });
+
+ const sycl::half * src0_f16 = static_cast<const sycl::half *>(src0->data);
+ float * dst_ddf = static_cast<float *>(dst->data);
+
+ const sycl::half * src1_f16 = static_cast<const sycl::half *>(src1->data);
+ const size_t type_size_src0 = ggml_type_size(src0->type);
+ const size_t type_size_src1 = ggml_type_size(src1->type);
+
+ bool is_src0_cont_2 = ggml_is_contiguous_2(src0);
+ bool is_src1_cont_2 = ggml_is_contiguous_2(src1);
+
+ // SRC1 strides
+ int64_t s11 = nb11 / type_size_src1;
+ int64_t s12 = nb12 / type_size_src1;
+ int64_t s13 = nb13 / type_size_src1;
+ ggml_sycl_pool_alloc<sycl::half> src1_f16_alloc(ctx.pool());
+
+ // convert src1 to fp16
+ if (src1->type != GGML_TYPE_F16) {
+ scope_op_debug_print scope_dbg_print(__func__, "/to_fp16_nc_sycl", dst, /*num_src=*/2,
+ " : converting src1 to fp16");
+
+ // iterate tensor dims and find the slowest moving dim and stride
+ int last_dim=0;
+ int last_str=0;
+ size_t largest_str=0;
+ for(int i = 0; i< 4; i++){
+ // last stride is always the largest
+ if(src1->nb[i] == largest_str){
+ if(src1->ne[last_dim] == 1){
+ last_str = i;
+ last_dim = i;
+ }
+ }
+ if(src1->nb[i] > largest_str){
+ largest_str = src1->nb[i];
+ last_str = i;
+ last_dim = i;
+ }
+
+ }
+#if GGML_SYCL_DNNL
+ // oneDNN handles strided data and does not need overhead of get_to_fp16_nc_sycl
+ const int64_t ne_src1 = src1->nb[last_str] * src1->ne[last_dim] / type_size_src1;
+ src1_f16_alloc.alloc(ne_src1);
+ const to_fp16_sycl_t to_fp16_sycl = ggml_get_to_fp16_sycl(src1->type, dst);
+ GGML_ASSERT(to_fp16_sycl != nullptr);
+ to_fp16_sycl(src1_f16, src1_f16_alloc.get(), ne_src1, queue);
+# else
+ const int64_t ne_src1 = ggml_nelements(src1);
+ src1_f16_alloc.alloc(ne_src1);
+ const to_fp16_nc_sycl_t to_fp16_nc_sycl = get_to_fp16_nc_sycl(src1->type);
+ GGML_ASSERT(to_fp16_nc_sycl != nullptr);
+ to_fp16_nc_sycl(src1_f16, src1_f16_alloc.get(), ne10, ne11, ne12, ne13, s11, s12, s13, queue);
+#endif
+
+ src1_f16 = src1_f16_alloc.get();
+ s11 = ne10;
+ s12 = ne11 * s11;
+ s13 = ne12 * s12;
+
+ is_src1_cont_2 = true;
+ }
+
+ ggml_sycl_pool_alloc<sycl::half> dst_f16(ctx.pool());
+
+ dpct::library_data_t mkl_compute_type = dpct::library_data_t::real_float;
+ dpct::library_data_t mkl_data_type = dpct::library_data_t::real_float;
+
+ // dst strides
+ size_t nbd2 = dst->nb[2];
+ size_t nbd3 = dst->nb[3];
+
+ const float alpha_f32 = 1.0f;
+ const float beta_f32 = 0.0f;
+
+ const void * alpha = &alpha_f32;
+ const void * beta = &beta_f32;
+
+ GGML_ASSERT(ne12 % ne02 == 0);
+ GGML_ASSERT(ne13 % ne03 == 0);
+ GGML_ASSERT(ne01 == static_cast<int64_t>(nb1/nb0));
+ GGML_ASSERT(ne10 == ne00);
+
+ // broadcast factors
+ const int64_t r2 = ne12 / ne02;
+ const int64_t r3 = ne13 / ne03;
+
+#if GGML_SYCL_DNNL
+ if (!g_ggml_sycl_disable_dnn) {
+ int64_t str_a0 = nb00 / type_size_src0;
+ int64_t str_a1 = nb01 / type_size_src0;
+ int64_t str_a2 = nb02 / type_size_src0;
+
+ int64_t str_b0 = nb10 / type_size_src1;
+ int64_t str_b1 = nb11 / type_size_src1;
+ int64_t str_b2 = nb12 / type_size_src1;
+
+ auto launch_gemm_for_batches = [&ctx, queue](const sycl::half *src0,
+ const sycl::half *src1, float *dst,
+ int64_t a0, int64_t a1, int64_t batcha,
+ int64_t /*b0*/, int64_t b1, int64_t batchb,
+ int64_t sa0, int64_t sa1, int64_t sa2,
+ int64_t sb0, int64_t sb1, int64_t sb2,
+ int64_t sd2) {
+ bool supported_broadcast = batchb == batcha ? true
+ : batchb == 1 || batcha == 1 ? true
+ : false;
+ if (supported_broadcast) {
+ DnnlGemmWrapper::gemm(ctx, a1, b1, a0, src0,
+ DnnlGemmWrapper::to_dt<sycl::half>(), sa0, sa1, sa2, src1,
+ DnnlGemmWrapper::to_dt<sycl::half>(), sb0, sb1, sb2, dst,
+ DnnlGemmWrapper::to_dt<float>(), queue, batcha, batchb);
+ } else {
+ // iterate over batches from smaller set of matrices (matrix 0)
+ int64_t batches0 = batcha;
+ int64_t batches1 = batchb;
+
+ if (batches0 > batches1) {
+ int64_t num_mul_mats = batches1;
+ int64_t sub_batch = batches0 / num_mul_mats;
+ // src0 is batched and bigger, shift and multiply with src1
+ for (int64_t i0 = 0; i0 < num_mul_mats; i0++) {
+ const sycl::half *src0_shifted = src0 + (sa2 * i0 * sub_batch);
+ const sycl::half *src1_shifted = src1 + (sb2 * i0);
+ float *dst_shifted = dst + (sd2 * i0 * sub_batch);
+ DnnlGemmWrapper::gemm(ctx, a1, b1, a0, src0_shifted,
+ DnnlGemmWrapper::to_dt<sycl::half>(), sa0, sa1, sa2,
+ src1_shifted, DnnlGemmWrapper::to_dt<sycl::half>(), sb0,
+ sb1, sb2, dst_shifted, DnnlGemmWrapper::to_dt<float>(),
+ queue, sub_batch, 1);
+ }
+ } else {
+ int64_t num_mul_mats = batches0;
+ int64_t sub_batch = batches1 / num_mul_mats;
+ // src1 is batched and bigger, shift and multiply with src0
+ for (int64_t i1 = 0; i1 < num_mul_mats; i1++) {
+ const sycl::half *src0_shifted = src0 + (sa2 * i1);
+ const sycl::half *src1_shifted = src1 + (sb2 * i1 * sub_batch);
+ float *dst_shifted = dst + (sd2 * i1 * sub_batch);
+ DnnlGemmWrapper::gemm(ctx, a1, b1, a0, src0_shifted,
+ DnnlGemmWrapper::to_dt<sycl::half>(), sa0, sa1, sa2,
+ src1_shifted, DnnlGemmWrapper::to_dt<sycl::half>(), sb0,
+ sb1, sb2, dst_shifted, DnnlGemmWrapper::to_dt<float>(),
+ queue, 1, sub_batch);
+ }
+ }
+ }
+ };
+
+ const bool cont_batches_dim2_a = nb02 * ne02 == nb03;
+ const bool cont_batches_dim2_b = nb12 * ne12 == nb13;
+ const bool cont_batches_dim3_a = ne02 == 1 && nb02 * ne01 == nb03;
+ const bool cont_batches_dim3_b = ne12 == 1 && nb12 * ne11 == nb13;
+ if (cont_batches_dim2_a && cont_batches_dim2_b) {
+ // A batch is considered contiguous if the dimension 2 is not strided
+ int64_t batches0 = ne02 * ne03;
+ int64_t batches1 = ne12 * ne13;
+ launch_gemm_for_batches(src0_f16, src1_f16, dst_ddf, ne00, ne01, batches0,
+ ne10, ne11, batches1, str_a0, str_a1, str_a2, str_b0, str_b1,
+ str_b2, nb2 / sizeof(float));
+ } else if (cont_batches_dim3_a && cont_batches_dim3_b) {
+ // This case is similar to the one above with the difference that only the batch in dimension 3 is used and the dimension 2 is of size 1.
+ int64_t batches0 = ne02 * ne03;
+ int64_t batches1 = ne12 * ne13;
+ int64_t str_a3 = nb03 / type_size_src0;
+ int64_t str_b3 = nb13 / type_size_src1;
+ launch_gemm_for_batches(src0_f16, src1_f16, dst_ddf, ne00, ne01, batches0,
+ ne10, ne11, batches1, str_a0, str_a1, str_a3, str_b0, str_b1,
+ str_b3, nb2 / sizeof(float));
+ } else {
+ for (int64_t b_a = 0; b_a < ne03; b_a++) {
+ const sycl::half *src0_f16_shifted
+ = src0_f16 + (nb03 * b_a / type_size_src0);
+ const sycl::half *src1_f16_shifted
+ = src1_f16 + (nb13 * b_a / type_size_src1);
+ float *dst_shifted = dst_ddf + (nb3 * b_a / sizeof(float));
+ int64_t batches0 = ne02;
+ int64_t batches1 = ne12;
+ launch_gemm_for_batches(src0_f16_shifted, src1_f16_shifted, dst_shifted,
+ ne00, ne01, batches0, ne10, ne11, batches1, str_a0, str_a1,
+ str_a2, str_b0, str_b1, str_b2, nb2 / sizeof(float));
+ }
+ }
+
+ }
+ else
+#endif
+ {
+ if (r2 == 1 && r3 == 1 && is_src0_cont_2 && is_src1_cont_2) {
+ // with a [0, 2, 1, 3] perm. and ne02==1 the matrix strides need to be determined from dim 3:
+ const int64_t sma = ne02 == 1 ? nb03/nb00 : nb02/nb00;
+ const int64_t smb = ne12 == 1 ? s13 : s12;
+
+ // there is no broadcast and src0, src1 are contiguous across dims 2, 3
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch(*queue, oneapi::mkl::transpose::trans,
+ oneapi::mkl::transpose::nontrans, ne01, ne11, ne10, alpha,
+ src0_f16, dpct::library_data_t::real_half, nb01 / nb00, sma,
+ src1_f16, dpct::library_data_t::real_half, s11, smb, beta, dst_ddf,
+ mkl_data_type, ne0, ne1 * ne0, ne12 * ne13, mkl_compute_type)));
+ } else {
+ const int ne23 = ne12 * ne13;
+
+ ggml_sycl_pool_alloc<const void *> ptrs_src(ctx.pool(), 2 * ne23);
+ ggml_sycl_pool_alloc<void *> ptrs_dst(ctx.pool(), 1 * ne23);
+ ggml_sycl_pool_alloc<matrix_info_t<float>> matrix_info(ctx.host_pool(), 1);
+
+ sycl::range<3> block_dims(1, ne12, ne13);
+ queue->submit([&](sycl::handler & cgh) {
+ const void ** ptrs_src_get = ptrs_src.get();
+ void ** ptrs_dst_get = ptrs_dst.get();
+ size_t nb12_scaled = src1->type == GGML_TYPE_F16 ? nb12 : s12 * sizeof(sycl::half);
+ size_t nb13_scaled = src1->type == GGML_TYPE_F16 ? nb13 : s13 * sizeof(sycl::half);
+ cgh.parallel_for(sycl::nd_range<3>(block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) {
+ k_compute_batched_ptrs(src0_f16, src1_f16, dst_ddf, ptrs_src_get, ptrs_dst_get, ne12, ne13, ne23, nb02,
+ nb03, nb12_scaled, nb13_scaled, nbd2, nbd3, r2, r3, item_ct1);
+ });
+ });
+
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::gemm_batch(
+ *queue, oneapi::mkl::transpose::trans, oneapi::mkl::transpose::nontrans, ne01, ne11, ne10, alpha,
+ (const void **) (ptrs_src.get() + 0 * ne23), dpct::library_data_t::real_half, nb01 / nb00,
+ (const void **) (ptrs_src.get() + 1 * ne23), dpct::library_data_t::real_half, s11, beta,
+ (void **) (ptrs_dst.get() + 0 * ne23), mkl_data_type, ne0, ne23, mkl_compute_type, matrix_info.get())));
+ }
+ }
+} catch (const sycl::exception & exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+enum class mul_mat_algo {
+ DMMV = 0,
+ MMVQ = 1,
+ MUL_MAT_SYCL = 2,
+};
+
+inline bool ggml_sycl_supports_mmq(enum ggml_type type) {
+ // TODO: accuracy issues in MMQ
+ GGML_UNUSED(type);
+ return false;
+}
+
+inline bool ggml_sycl_supports_reorder_mul_mat_sycl(enum ggml_type type) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ return true;
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q6_K:
+ return !g_ggml_sycl_prioritize_dmmv;
+ default:
+ return false;
+ }
+}
+
+inline bool ggml_sycl_supports_reorder_dmmv(enum ggml_type type) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ return true;
+ default:
+ return false;
+ }
+}
+
+inline bool ggml_sycl_supports_reorder_mmvq(enum ggml_type type) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q6_K:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool ggml_sycl_supports_dmmv(enum ggml_type type) {
+ switch (type) {
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q2_K:
+ case GGML_TYPE_Q3_K:
+ case GGML_TYPE_Q4_K:
+ case GGML_TYPE_Q5_K:
+ case GGML_TYPE_Q6_K:
+ case GGML_TYPE_F16:
+ return true;
+ default:
+ return false;
+ }
+}
+
+// Helper functions to unify device memory allocation for both async and sync paths
+static inline void * sycl_ext_malloc_device(dpct::queue_ptr stream, size_t size) {
+ bool use_async = g_ggml_sycl_use_async_mem_op;
+#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC
+ if (use_async) {
+ return syclex::async_malloc(*stream, sycl::usm::alloc::device, size);
+ }
+#else
+ // If async allocation extension is not available, use_async should always be false.
+ GGML_ASSERT(!use_async);
+#endif
+ return sycl::malloc(size, *stream, sycl::usm::alloc::device);
+}
+
+static inline void sycl_ext_free(dpct::queue_ptr stream, void * ptr) {
+ bool use_async = g_ggml_sycl_use_async_mem_op;
+#if defined(GGML_SYCL_GRAPH) && SYCL_EXT_ONEAPI_ASYNC_MEMORY_ALLOC
+ if (use_async) {
+ syclex::async_free(*stream, ptr);
+ return;
+ }
+#else
+ // If async allocation extension is not available, use_async should always be false.
+ GGML_ASSERT(!use_async);
+#endif
+ sycl::free(ptr, *stream);
+}
+
+static void reorder_qw_q4_0(uint8_t * data_device, const int ncols, const int nrows, size_t size, size_t offset,
+ dpct::queue_ptr stream) {
+ uint8_t * tmp_buf = static_cast<uint8_t *>(sycl_ext_malloc_device(stream, size));
+
+ sycl::event copy_event;
+ SYCL_CHECK(CHECK_TRY_ERROR(copy_event = stream->memcpy(tmp_buf, data_device, size)));
+ if (!g_ggml_sycl_use_async_mem_op) {
+ copy_event.wait();
+ }
+
+ GGML_ASSERT((size % sizeof(block_q4_0) == 0));
+ GGML_ASSERT((offset % sizeof(block_q4_0) == 0));
+ int offset_blks = offset / sizeof(block_q4_0);
+ auto qs_ptr = data_device + offset_blks * QK4_0 / 2;
+ auto d_ptr = (sycl::half*)(qs_ptr + ncols * nrows / 2) + offset_blks;
+
+ auto reorder_event = stream->parallel_for(
+ size / sizeof(block_q4_0),
+ [=](auto i) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ const block_q4_0* x = (const block_q4_0*)tmp_buf;
+ const int ib = i;
+
+ for (int j = 0; j < QK4_0/2; j ++)
+ {
+ *(qs_ptr + ib * QK4_0 / 2 + j) = x[ib].qs[j];
+ }
+ *(d_ptr + ib) = x[ib].d;
+ });
+ if (!g_ggml_sycl_use_async_mem_op) {
+ reorder_event.wait_and_throw();
+ }
+ sycl_ext_free(stream, tmp_buf);
+}
+
+static void reorder_qw_q4_k(uint8_t * data_device, size_t size, size_t offset, dpct::queue_ptr stream) {
+ GGML_ASSERT(size % sizeof(block_q4_K) == 0);
+ GGML_ASSERT(offset % sizeof(block_q4_K) == 0);
+
+ const int nblocks = size / sizeof(block_q4_K);
+
+ uint8_t * tmp_buf = static_cast<uint8_t *>(sycl_ext_malloc_device(stream, size));
+
+ sycl::event copy_event;
+ SYCL_CHECK(CHECK_TRY_ERROR(copy_event = stream->memcpy(tmp_buf, data_device, size)));
+ if (!g_ggml_sycl_use_async_mem_op) {
+ copy_event.wait();
+ }
+
+ auto * qs_ptr = data_device;
+ auto * scales_ptr = qs_ptr + QK_K / 2 * nblocks;
+ auto * dm_ptr = (sycl::half2 *) (scales_ptr + K_SCALE_SIZE * nblocks);
+
+ auto reorder_event = stream->parallel_for(nblocks, [=](auto i) {
+ const block_q4_K * x = (const block_q4_K *) tmp_buf;
+ const int ib = i;
+
+ for (int j = 0; j < QK_K / 2; ++j) {
+ qs_ptr[ib * (QK_K / 2) + j] = x[ib].qs[j];
+ }
+
+ for (int j = 0; j < K_SCALE_SIZE; ++j) {
+ scales_ptr[ib * K_SCALE_SIZE + j] = x[ib].scales[j];
+ }
+
+ dm_ptr[ib] = x[ib].dm;
+ });
+ if (!g_ggml_sycl_use_async_mem_op) {
+ reorder_event.wait_and_throw();
+ }
+ sycl_ext_free(stream, tmp_buf);
+}
+
+static void reorder_qw_q6_k(uint8_t * data_device, size_t size, size_t offset, dpct::queue_ptr stream) {
+ GGML_ASSERT(size % sizeof(block_q6_K) == 0);
+ GGML_ASSERT(offset % sizeof(block_q6_K) == 0);
+
+ const int nblocks = size / sizeof(block_q6_K);
+
+ uint8_t * tmp_buf = static_cast<uint8_t *>(sycl_ext_malloc_device(stream, size));
+
+ sycl::event copy_event;
+ SYCL_CHECK(CHECK_TRY_ERROR(copy_event = stream->memcpy(tmp_buf, data_device, size)));
+ if (!g_ggml_sycl_use_async_mem_op) {
+ copy_event.wait();
+ }
+
+ auto * ql_ptr = data_device;
+ auto * qh_ptr = ql_ptr + (QK_K / 2) * nblocks;
+ auto * scales_ptr = qh_ptr + (QK_K / 4) * nblocks;
+ sycl::half * dm_ptr = (sycl::half *) (scales_ptr + (QK_K / 16) * nblocks);
+
+ auto reorder_event = stream->parallel_for(nblocks, [=](auto i) {
+ const block_q6_K * x = (const block_q6_K *) tmp_buf;
+ const int ib = i;
+
+ const uint8_t * ql = x[ib].ql;
+ const uint8_t * qh = x[ib].qh;
+ uint8_t * base_ql_ptr = ql_ptr + (QK_K / 2) * ib;
+ uint8_t * base_qh_ptr = qh_ptr + (QK_K / 4) * ib;
+ uint8_t * base_scales_ptr = scales_ptr + (QK_K / 16) * ib;
+
+ for (int j = 0; j < QK_K / 2; ++j) {
+ base_ql_ptr[j] = ql[j];
+ }
+ for (int j = 0; j < QK_K / 4; ++j) {
+ base_qh_ptr[j] = qh[j];
+ }
+
+ for (int j = 0; j < QK_K / 16; ++j) {
+ base_scales_ptr[j] = x[ib].scales[j];
+ }
+
+ dm_ptr[ib] = x[ib].d;
+ });
+ if (!g_ggml_sycl_use_async_mem_op) {
+ reorder_event.wait_and_throw();
+ }
+ sycl_ext_free(stream, tmp_buf);
+}
+
+static void reorder_qw(const ggml_tensor * src0, dpct::queue_ptr stream) {
+ uint8_t * data_device = (uint8_t *) src0->data;
+ size_t ncols = src0->ne[0];
+ size_t nrows = src0->ne[1];
+ size_t size = ggml_nbytes(src0);
+
+ switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ reorder_qw_q4_0(data_device, ncols, nrows, size, 0, stream);
+ break;
+ case GGML_TYPE_Q4_K:
+ reorder_qw_q4_k(data_device, size, 0, stream);
+ break;
+ case GGML_TYPE_Q6_K:
+ reorder_qw_q6_k(data_device, size, 0, stream);
+ break;
+ default:
+ GGML_ABORT("reorder_qw() called with unsupported type");
+ break;
+ }
+}
+
+static bool should_reorder_tensor(ggml_backend_sycl_context& ctx, const ggml_tensor * dst) {
+ return !g_ggml_sycl_disable_optimize && //allow optimize, controlled by $GGML_SYCL_DISABLE_OPT
+ ctx.opt_feature.reorder && //allow this device due to good perf, skip the devices with bad perf.
+ dst->op == GGML_OP_MUL_MAT && //limit to some supported cases of Q4_0, to do for more cases.
+ dst->src[1]->ne[1]==1 && dst->src[1]->ne[2]==1 && dst->src[1]->ne[3]==1;
+}
+
+static void opt_for_reorder(ggml_backend_sycl_context * ctx, const ggml_tensor * src0, const ggml_tensor * /* src1 */,
+ ggml_tensor * dst, mul_mat_algo mm_algorithm) {
+ if (!should_reorder_tensor(*ctx, dst)) {
+ return;
+ }
+
+ ggml_tensor_extra_gpu * extra = static_cast<ggml_tensor_extra_gpu *>(src0->extra);
+ if (!extra || extra->optimized_feature.reorder) {
+ return; // Skip permutations and already reordered tensors
+ }
+
+ switch (mm_algorithm) {
+ case mul_mat_algo::DMMV:
+ if (!ggml_sycl_supports_reorder_dmmv(src0->type)) {
+ return;
+ }
+ break;
+ case mul_mat_algo::MMVQ:
+ if (!ggml_sycl_supports_reorder_mmvq(src0->type)) {
+ return;
+ }
+ break;
+ case mul_mat_algo::MUL_MAT_SYCL:
+ if (!ggml_sycl_supports_reorder_mul_mat_sycl(src0->type)) {
+ return;
+ }
+ break;
+ }
+
+ reorder_qw(src0, ctx->stream());
+ extra->optimized_feature.reorder = true; // Used to decode/dequan in next steps and avoid re-reordering
+}
+
+
+static bool can_use_dequantize_mul_mat_vec(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ return ggml_sycl_supports_dmmv(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 &&
+ src0->ne[0] % GGML_SYCL_DMMV_X == 0 && src1->ne[1] == 1;
+}
+
+static bool can_use_mul_mat_vec_q(const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ return ggml_is_quantized(src0->type) && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32 &&
+ src1->ne[1] <= MMVQ_MAX_BATCH_SIZE;
+}
+
+static void ggml_sycl_mul_mat(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ const bool split = ggml_backend_buffer_is_sycl_split(src0->buffer);
+ int64_t min_compute_capability = INT_MAX;
+
+ if (split) {
+ ggml_backend_sycl_split_buffer_type_context * buft_ctx =
+ (ggml_backend_sycl_split_buffer_type_context *) src0->buffer->buft->context;
+ auto & tensor_split = buft_ctx->tensor_split;
+ for (int id = 0; id < ggml_sycl_info().device_count; ++id) {
+ // skip devices that are not going to do any work:
+ if (tensor_split[id] >= (id + 1 < ggml_sycl_info().device_count ? tensor_split[id + 1] : 1.0f)) {
+ continue;
+ }
+
+ if (min_compute_capability > ggml_sycl_info().devices[id].cc) {
+ min_compute_capability = ggml_sycl_info().devices[id].cc;
+ }
+ }
+ } else {
+ min_compute_capability = ggml_sycl_info().devices[ctx.device].cc;
+ }
+
+ // check data types and tensor shapes for custom matrix multiplication kernels:
+ bool use_dequantize_mul_mat_vec = can_use_dequantize_mul_mat_vec(src0, src1, dst);
+
+ bool use_mul_mat_vec_q = can_use_mul_mat_vec_q(src0, src1, dst);
+
+ bool use_mul_mat_q = ggml_sycl_supports_mmq(src0->type)
+ && src1->type == GGML_TYPE_F32 && dst->type == GGML_TYPE_F32;
+
+
+ // mmvq and mmq need the __dp4a instruction which is available for gen12+
+ // Workaround in https://github.com/ggml-org/llama.cpp/commit/95f84d5ce8b449a9b16009434aca800df504a02e
+ use_mul_mat_q = use_mul_mat_q && (src0->type != GGML_TYPE_IQ2_XXS);
+#ifdef SYCL_USE_XMX
+ use_mul_mat_q = use_mul_mat_q && (src1->ne[1] <= MMQ_MAX_BATCH_SIZE);
+#endif // SYCL_USE_XMX
+
+ // Dispatch becomes obscure with the reorder, MMVQ when the reorder optimization
+ // is enabled takes precedence over DMMV, the current if-else implementation
+ // requires disabling DMMV if both conditions are met
+ if (!g_ggml_sycl_prioritize_dmmv && ((should_reorder_tensor(ctx, dst) &&
+ ggml_sycl_supports_reorder_mmvq(src0->type)))) {
+ use_dequantize_mul_mat_vec = use_dequantize_mul_mat_vec && !use_mul_mat_vec_q;
+ }
+
+ if (!split && src0->type == GGML_TYPE_F16 && ggml_is_permuted(src0) && ggml_is_permuted(src1) && src1->ne[1] == 1) {
+ // TODO: Refactor and cleanup of mul mat dispatching.
+ if (src0->ne[3] == 1 && src1->ne[3] == 1) {
+ // KQ single-batch
+ // mmv p021 was specific for these dimensions
+ ggml_sycl_mul_mat_vec_p021(ctx, src0, src1, dst);
+ } else {
+ // The kernel from the if path is faster for that specific case, but does not support all mul mats.
+ ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst);
+ }
+ } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_contiguous(src0) && !ggml_is_transposed(src1) && src1->ne[1] == 1 && src1->ne[3] == 1) {
+ // KQV single-batch
+ ggml_sycl_mul_mat_vec_nc(ctx, src0, src1, dst);
+ } else if (!split && src0->type == GGML_TYPE_F16 && !ggml_is_transposed(src0) && !ggml_is_transposed(src1) && src1->ne[2] * src1->ne[3] > 1) {
+ // KQ + KQV multi-batch
+ ggml_sycl_mul_mat_batched_sycl(ctx, src0, src1, dst);
+ } else if (use_dequantize_mul_mat_vec) {
+ opt_for_reorder(&ctx, src0, src1, dst, mul_mat_algo::DMMV);
+ ggml_sycl_op_mul_mat<no_quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_dequantize_mul_mat_vec);
+ } else if (use_mul_mat_vec_q) {
+ opt_for_reorder(&ctx, src0, src1, dst, mul_mat_algo::MMVQ);
+ ggml_tensor_extra_gpu * extra = static_cast<ggml_tensor_extra_gpu *>(src0->extra);
+ if (extra && extra->optimized_feature.reorder) {
+ ggml_sycl_op_mul_mat<quantize_and_reorder_q8_1_soa>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_vec_q);
+ } else {
+ ggml_sycl_op_mul_mat<quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_vec_q);
+ }
+ } else if (use_mul_mat_q) {
+ ggml_sycl_op_mul_mat<quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_q);
+ } else {
+ ggml_sycl_op_mul_mat<no_quantize_q8_1>(ctx, src0, src1, dst, ggml_sycl_op_mul_mat_sycl);
+ }
+}
+
+
+struct mmid_row_mapping {
+ int32_t i1;
+ int32_t i2;
+};
+
+__dpct_inline__ static void k_copy_src1_to_contiguous(
+ const char *__restrict__ src1_original, char *__restrict__ src1_contiguous,
+ int *__restrict__ cur_src1_row, mmid_row_mapping *__restrict__ row_mapping,
+ const char *__restrict ids, int64_t i02, size_t ids_nb1, size_t ids_nb0,
+ int64_t ne11, int64_t ne10, size_t nb11, size_t nb12,
+ const sycl::nd_item<3> &item_ct1, int &src1_row) {
+ int32_t iid1 = item_ct1.get_group(2);
+ int32_t id = item_ct1.get_group(1);
+
+ const int32_t row_id_i = *(const int32_t *) (ids + iid1*ids_nb1 + id*ids_nb0);
+
+ if (row_id_i != i02) {
+ return;
+ }
+
+ const int64_t i11 = id % ne11;
+ const int64_t i12 = iid1;
+
+ if (item_ct1.get_local_id(2) == 0) {
+ src1_row =
+ dpct::atomic_fetch_add<sycl::access::address_space::generic_space>(
+ cur_src1_row, 1);
+ row_mapping[src1_row] = {id, iid1};
+ }
+ /*
+ DPCT1065:194: Consider replacing sycl::nd_item::barrier() with
+ sycl::nd_item::barrier(sycl::access::fence_space::local_space) for better
+ performance if there is no access to global memory.
+ */
+ item_ct1.barrier();
+
+ const float * src1_row_original = (const float *)(src1_original + i11*nb11 + i12*nb12);
+ float * src1_row_contiguous = (float *)(src1_contiguous + src1_row*nb11);
+
+#pragma unroll
+ for (int i = item_ct1.get_local_id(2); i < ne10;
+ i += item_ct1.get_local_range(2)) {
+ src1_row_contiguous[i] = src1_row_original[i];
+ }
+}
+
+__dpct_inline__ static void k_copy_dst_from_contiguous(
+ char *__restrict__ dst_original, const char *__restrict__ dst_contiguous,
+ const mmid_row_mapping *__restrict__ row_mapping, int64_t ne0, size_t nb1,
+ size_t nb2, const sycl::nd_item<3> &item_ct1) {
+ int32_t i = item_ct1.get_group(2);
+
+ const int32_t i1 = row_mapping[i].i1;
+ const int32_t i2 = row_mapping[i].i2;
+
+ const float * dst_row_contiguous = (const float *)(dst_contiguous + i*nb1);
+ float * dst_row_original = (float *)(dst_original + i1*nb1 + i2*nb2);
+
+#pragma unroll
+ for (int j = item_ct1.get_local_id(2); j < ne0;
+ j += item_ct1.get_local_range(2)) {
+ dst_row_original[j] = dst_row_contiguous[j];
+ }
+}
+
+static void ggml_sycl_mul_mat_id(ggml_backend_sycl_context & ctx,
+ ggml_tensor *dst) try {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/3);
+ const ggml_tensor *src0 = dst->src[0];
+ const ggml_tensor *src1 = dst->src[1];
+ GGML_ASSERT(!ggml_backend_buffer_is_sycl_split(src0->buffer) && "mul_mat_id does not support split buffers");
+
+ const ggml_tensor *ids = dst->src[2];
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ const queue_ptr stream = ctx.stream();
+
+ const int64_t n_as = ne02;
+ const int64_t n_ids = ids->ne[0];
+
+ std::vector<char> ids_host(ggml_nbytes(ids));
+ const char * ids_dev = (const char *) ids->data;
+
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ stream->memcpy(ids_host.data(), ids_dev, ggml_nbytes(ids))));
+ SYCL_CHECK(CHECK_TRY_ERROR(stream->wait()));
+
+ ggml_tensor src0_row = *src0;
+ ggml_tensor src1_row = *src1;
+ ggml_tensor dst_row = *dst;
+
+ char *src0_original = (char *)src0->data;
+ char *src1_original = (char *)src1->data;
+ char *dst_original = (char *)dst->data;
+
+ src0_row.ne[2] = 1;
+ src0_row.ne[3] = 1;
+ src0_row.nb[3] = nb02;
+
+ src1_row.ne[1] = 1;
+ src1_row.ne[2] = 1;
+ src1_row.ne[3] = 1;
+ src1_row.nb[2] = nb11;
+ src1_row.nb[3] = nb11;
+
+ dst_row.ne[1] = 1;
+ dst_row.ne[2] = 1;
+ dst_row.ne[3] = 1;
+ dst_row.nb[2] = nb1;
+ dst_row.nb[3] = nb1;
+ if (ne12 == 1) {
+ for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) {
+ for (int64_t id = 0; id < n_ids; id++) {
+ const int32_t i02 = *(const int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]);
+ GGML_ASSERT(i02 >= 0 && i02 < n_as);
+
+ const int64_t i11 = id % ne11;
+ const int64_t i12 = iid1;
+
+ const int64_t i1 = id;
+ const int64_t i2 = i12;
+
+ src0_row.data = src0_original + i02*nb02;
+ src1_row.data = src1_original + i11*nb11 + i12*nb12;
+ dst_row.data = dst_original + i1*nb1 + i2*nb2;
+
+ ggml_sycl_mul_mat(ctx, &src0_row, &src1_row, &dst_row);
+ }
+ }
+ } else {
+ ggml_sycl_pool_alloc<char> src1_contiguous(ctx.pool(), sizeof(float)*ggml_nelements(src1));
+ ggml_sycl_pool_alloc<char> dst_contiguous(ctx.pool(), sizeof(float)*ggml_nelements(dst));
+
+ src1_row.data = src1_contiguous.get();
+ dst_row.data = dst_contiguous.get();
+
+ for (int64_t i02 = 0; i02 < n_as; i02++) {
+ int64_t num_src1_rows = 0;
+ for (int64_t iid1 = 0; iid1 < ids->ne[1]; iid1++) {
+ for (int64_t id = 0; id < n_ids; id++) {
+ const int32_t row_id_i = *(const int32_t *) (ids_host.data() + iid1*ids->nb[1] + id*ids->nb[0]);
+
+ GGML_ASSERT(row_id_i >= 0 && row_id_i < n_as);
+
+ if (row_id_i != i02) {
+ continue;
+ }
+
+ num_src1_rows++;
+ }
+ }
+
+ if (num_src1_rows == 0) {
+ continue;
+ }
+
+
+ ggml_sycl_pool_alloc<int> dev_cur_src1_row(ctx.pool(), 1);
+ ggml_sycl_pool_alloc<mmid_row_mapping> dev_row_mapping(ctx.pool(), num_src1_rows);
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ stream->memset(dev_cur_src1_row.get(), 0, sizeof(int))));
+
+ const unsigned int max_work_group_size = ggml_sycl_info().max_work_group_sizes[ctx.device];
+ assert(max_work_group_size % (WARP_SIZE * WARP_SIZE) == 0);
+
+ {
+ sycl::range<3> block_dims(1, 1, std::min((unsigned int)ne10, max_work_group_size));
+ sycl::range<3> grid_dims(1, n_ids, ids->ne[1]);
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 0> src1_row_acc(cgh);
+
+ char *__restrict src1_contiguous_get =
+ src1_contiguous.get();
+ int *__restrict dev_cur_src1_row_get =
+ dev_cur_src1_row.get();
+ mmid_row_mapping *__restrict dev_row_mapping_get =
+ dev_row_mapping.get();
+ size_t ids_nb_ct6 = ids->nb[1];
+ size_t ids_nb_ct7 = ids->nb[0];
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_copy_src1_to_contiguous(
+ src1_original, src1_contiguous_get,
+ dev_cur_src1_row_get,
+ dev_row_mapping_get, ids_dev, i02,
+ ids_nb_ct6, ids_nb_ct7, ne11, ne10, nb11, nb12,
+ item_ct1, src1_row_acc);
+ });
+ });
+ }
+
+ src0_row.data = src0_original + i02*nb02;
+
+ GGML_ASSERT(nb11 == sizeof(float)*ne10);
+ GGML_ASSERT(nb1 == sizeof(float)*ne0);
+ src1_row.ne[1] = num_src1_rows;
+
+ src1_row.nb[1] = nb11;
+ src1_row.nb[2] = num_src1_rows*nb11;
+ src1_row.nb[3] = num_src1_rows*nb11;
+
+ dst_row.ne[1] = num_src1_rows;
+ dst_row.nb[1] = nb1;
+ dst_row.nb[2] = num_src1_rows*nb1;
+ dst_row.nb[3] = num_src1_rows*nb1;
+
+ ggml_sycl_mul_mat(ctx, &src0_row, &src1_row, &dst_row);
+
+ {
+ sycl::range<3> block_dims(1, 1, std::min((unsigned int)ne0, max_work_group_size));
+ sycl::range<3> grid_dims(1, 1, num_src1_rows);
+ stream->submit([&](sycl::handler &cgh) {
+ const char *__restrict dst_contiguous_get =
+ dst_contiguous.get();
+ const mmid_row_mapping *__restrict dev_row_mapping_get =
+ dev_row_mapping.get();
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ k_copy_dst_from_contiguous(dst_original,
+ dst_contiguous_get,
+ dev_row_mapping_get,
+ ne0, nb1, nb2, item_ct1);
+ });
+ });
+ }
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_sycl_scale(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_scale(ctx, dst);
+}
+
+static void ggml_sycl_diag_mask_inf(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_diag_mask_inf(ctx, dst);
+}
+
+static void ggml_sycl_pool2d(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_pool2d(ctx, dst);
+}
+
+static void ggml_sycl_im2col(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ ggml_sycl_op_im2col(ctx, dst);
+}
+
+static void ggml_sycl_sum(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ GGML_ASSERT(ggml_is_contiguous(dst->src[0]));
+ ggml_sycl_op_sum(ctx, dst);
+}
+
+static void ggml_sycl_sum_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ GGML_ASSERT(ggml_is_contiguous(dst->src[0]));
+ ggml_sycl_op_sum_rows(ctx, dst);
+}
+
+static void ggml_sycl_mean(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ GGML_ASSERT(ggml_is_contiguous(dst->src[0]));
+ ggml_sycl_op_mean(ctx, dst);
+}
+
+static void ggml_sycl_argsort(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ GGML_ASSERT(ggml_is_contiguous(dst->src[0]));
+ ggml_sycl_op_argsort(ctx, dst);
+}
+
+static void ggml_sycl_argmax(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ GGML_ASSERT(ggml_is_contiguous(dst->src[0]));
+ ggml_sycl_op_argmax(ctx, dst);
+}
+
+
+static void ggml_sycl_set_main_device(const int main_device) try {
+ if (dpct::get_current_device_id() == static_cast<unsigned int> (main_device)) {
+ return;
+ }
+ check_allow_gpu_index(main_device);
+ dpct::select_device(main_device);
+
+ if (g_ggml_sycl_debug) {
+ dpct::device_info prop;
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info(
+ prop, dpct::dev_mgr::instance().get_device(main_device))));
+ GGML_LOG_INFO("Using device %d (%s) as main device\n",
+ main_device, prop.get_name());
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static bool ggml_sycl_compute_forward(ggml_backend_sycl_context & ctx, struct ggml_tensor * dst) try {
+ if (!g_sycl_loaded) return false;
+
+ if (dst->src[0] != nullptr && ggml_backend_buffer_is_sycl_split(dst->src[0]->buffer)) {
+ ggml_sycl_set_peer_access(dst->src[1]->ne[1], ctx.device);
+ }
+
+ switch (dst->op) {
+ case GGML_OP_ARGMAX:
+ ggml_sycl_argmax(ctx, dst);
+ break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ ggml_sycl_op_conv_transpose_1d(ctx, dst);
+ break;
+ case GGML_OP_REPEAT:
+ ggml_sycl_repeat(ctx, dst);
+ break;
+ case GGML_OP_REPEAT_BACK:
+ ggml_sycl_repeat_back(ctx, dst);
+ break;
+ case GGML_OP_GET_ROWS:
+ ggml_sycl_get_rows(ctx, dst);
+ break;
+ case GGML_OP_SET:
+ ggml_sycl_op_set(ctx, dst);
+ break;
+ case GGML_OP_SET_ROWS:
+ ggml_sycl_op_set_rows(ctx, dst);
+ break;
+ case GGML_OP_DUP:
+ ggml_sycl_dup(ctx, dst);
+ break;
+ case GGML_OP_ADD:
+ case GGML_OP_ADD1: // TODO: more efficient implementation
+ ggml_sycl_add(ctx, dst);
+ break;
+ case GGML_OP_ADD_ID:
+ ggml_sycl_add_id(ctx, dst);
+ break;
+ case GGML_OP_SUB:
+ ggml_sycl_sub(ctx, dst);
+ break;
+ case GGML_OP_COUNT_EQUAL:
+ ggml_sycl_count_equal(ctx, dst);
+ break;
+ case GGML_OP_ACC:
+ ggml_sycl_acc(ctx, dst);
+ break;
+ case GGML_OP_MUL:
+ ggml_sycl_mul(ctx, dst);
+ break;
+ case GGML_OP_LOG:
+ ggml_sycl_log(ctx, dst);
+ break;
+ case GGML_OP_DIV:
+ ggml_sycl_div(ctx, dst);
+ break;
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(dst)) {
+ case GGML_UNARY_OP_NEG:
+ ggml_sycl_neg(ctx, dst);
+ break;
+ case GGML_UNARY_OP_STEP:
+ ggml_sycl_step(ctx, dst);
+ break;
+ case GGML_UNARY_OP_GELU:
+ ggml_sycl_gelu(ctx, dst);
+ break;
+ case GGML_UNARY_OP_SILU:
+ ggml_sycl_silu(ctx, dst);
+ break;
+ case GGML_UNARY_OP_GELU_QUICK:
+ ggml_sycl_gelu_quick(ctx, dst);
+ break;
+ case GGML_UNARY_OP_GELU_ERF:
+ ggml_sycl_gelu_erf(ctx, dst);
+ break;
+ case GGML_UNARY_OP_TANH:
+ ggml_sycl_tanh(ctx, dst);
+ break;
+ case GGML_UNARY_OP_RELU:
+ ggml_sycl_relu(ctx, dst);
+ break;
+ case GGML_UNARY_OP_SIGMOID:
+ ggml_sycl_sigmoid(ctx, dst);
+ break;
+ case GGML_UNARY_OP_HARDSIGMOID:
+ ggml_sycl_hardsigmoid(ctx, dst);
+ break;
+ case GGML_UNARY_OP_HARDSWISH:
+ ggml_sycl_hardswish(ctx, dst);
+ break;
+ case GGML_UNARY_OP_EXP:
+ ggml_sycl_exp(ctx, dst);
+ break;
+ case GGML_UNARY_OP_SOFTPLUS:
+ ggml_sycl_softplus(ctx, dst);
+ break;
+ case GGML_UNARY_OP_SGN:
+ ggml_sycl_sgn(ctx, dst);
+ break;
+ case GGML_UNARY_OP_ABS:
+ ggml_sycl_abs(ctx, dst);
+ break;
+ case GGML_UNARY_OP_ELU:
+ ggml_sycl_elu(ctx, dst);
+ break;
+ case GGML_UNARY_OP_FLOOR:
+ ggml_sycl_floor(ctx, dst);
+ break;
+ case GGML_UNARY_OP_CEIL:
+ ggml_sycl_ceil(ctx, dst);
+ break;
+ case GGML_UNARY_OP_ROUND:
+ ggml_sycl_round(ctx, dst);
+ break;
+ case GGML_UNARY_OP_TRUNC:
+ ggml_sycl_trunc(ctx, dst);
+ break;
+ default:
+ return false;
+ }
+ break;
+ case GGML_OP_GLU:
+ switch (ggml_get_glu_op(dst)) {
+ case GGML_GLU_OP_REGLU:
+ ggml_sycl_reglu(ctx, dst);
+ break;
+ case GGML_GLU_OP_GEGLU:
+ ggml_sycl_geglu(ctx, dst);
+ break;
+ case GGML_GLU_OP_SWIGLU:
+ ggml_sycl_swiglu(ctx, dst);
+ break;
+ case GGML_GLU_OP_SWIGLU_OAI:
+ ggml_sycl_swiglu_oai(ctx, dst);
+ break;
+ case GGML_GLU_OP_GEGLU_ERF:
+ ggml_sycl_geglu_erf(ctx, dst);
+ break;
+ case GGML_GLU_OP_GEGLU_QUICK:
+ ggml_sycl_geglu_quick(ctx, dst);
+ break;
+ default:
+ return false;
+ }
+ break;
+ case GGML_OP_NORM:
+ ggml_sycl_norm(ctx, dst);
+ break;
+ case GGML_OP_GROUP_NORM:
+ ggml_sycl_group_norm(ctx, dst);
+ break;
+ case GGML_OP_CONCAT:
+ ggml_sycl_op_concat(ctx, dst);
+ break;
+ case GGML_OP_PAD_REFLECT_1D:
+ ggml_sycl_op_pad_reflect_1d(ctx,dst);
+ break;
+ case GGML_OP_UPSCALE:
+ ggml_sycl_upscale(ctx, dst);
+ break;
+ case GGML_OP_PAD:
+ ggml_sycl_pad(ctx, dst);
+ break;
+ case GGML_OP_LEAKY_RELU:
+ ggml_sycl_leaky_relu(ctx, dst);
+ break;
+ case GGML_OP_RMS_NORM_BACK:
+ ggml_sycl_rms_norm_back(ctx, dst);
+ break;
+ case GGML_OP_RMS_NORM:
+ ggml_sycl_rms_norm(ctx, dst);
+ break;
+ case GGML_OP_L2_NORM:
+ ggml_sycl_l2_norm(ctx, dst);
+ break;
+ case GGML_OP_MUL_MAT:
+ if (dst->src[0]->ne[3] != dst->src[1]->ne[3]) {
+ return false;
+ }
+ /* ggml_sycl_mul_mat_id is dependent on ggml_sycl_mul_mat */
+ ggml_sycl_mul_mat(ctx, dst->src[0], dst->src[1], dst);
+ break;
+ case GGML_OP_MUL_MAT_ID:
+ if (dst->src[0]->ne[3] != dst->src[1]->ne[3]) {
+ return false;
+ }
+ ggml_sycl_mul_mat_id(ctx, dst);
+ break;
+ case GGML_OP_OUT_PROD:
+ ggml_sycl_op_out_prod(ctx, dst);
+ break;
+ case GGML_OP_SCALE:
+ ggml_sycl_scale(ctx, dst);
+ break;
+ case GGML_OP_SQR:
+ ggml_sycl_sqr(ctx, dst);
+ break;
+ case GGML_OP_SQRT:
+ ggml_sycl_sqrt(ctx, dst);
+ break;
+ case GGML_OP_SIN:
+ ggml_sycl_sin(ctx, dst);
+ break;
+ case GGML_OP_COS:
+ ggml_sycl_cos(ctx, dst);
+ break;
+ case GGML_OP_CLAMP:
+ ggml_sycl_clamp(ctx, dst);
+ break;
+ case GGML_OP_CPY:
+ ggml_sycl_cpy(ctx, dst->src[0], dst->src[1]);
+ break;
+ case GGML_OP_CONT:
+ ggml_sycl_dup(ctx, dst);
+ break;
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ GGML_SYCL_DEBUG("%s: Tensor NO-OP\n", __func__);
+ break;
+ case GGML_OP_TRI:
+ ggml_sycl_op_tri(ctx, dst);
+ break;
+ case GGML_OP_DIAG_MASK_INF:
+ ggml_sycl_diag_mask_inf(ctx, dst);
+ break;
+ case GGML_OP_SOFT_MAX:
+ ggml_sycl_op_soft_max(ctx, dst);
+ break;
+ case GGML_OP_SOFT_MAX_BACK:
+ ggml_sycl_op_soft_max_back(ctx, dst);
+ break;
+ case GGML_OP_ROPE:
+ ggml_sycl_rope(ctx, dst);
+ break;
+ case GGML_OP_IM2COL:
+ ggml_sycl_im2col(ctx, dst);
+ break;
+ case GGML_OP_POOL_2D:
+ ggml_sycl_pool2d(ctx, dst);
+ break;
+ case GGML_OP_SUM:
+ ggml_sycl_sum(ctx, dst);
+ break;
+ case GGML_OP_SUM_ROWS:
+ ggml_sycl_sum_rows(ctx, dst);
+ break;
+ case GGML_OP_MEAN:
+ ggml_sycl_mean(ctx, dst);
+ break;
+ case GGML_OP_ARGSORT:
+ ggml_sycl_argsort(ctx, dst);
+ break;
+ case GGML_OP_TOP_K:
+ ggml_sycl_op_top_k(ctx, dst);
+ break;
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ ggml_sycl_op_timestep_embedding(ctx, dst);
+ break;
+ case GGML_OP_RWKV_WKV6:
+ ggml_sycl_op_rwkv_wkv6(ctx, dst);
+ break;
+ case GGML_OP_RWKV_WKV7:
+ ggml_sycl_op_rwkv_wkv7(ctx, dst);
+ break;
+ case GGML_OP_GATED_LINEAR_ATTN:
+ ggml_sycl_op_gated_linear_attn(ctx, dst);
+ break;
+ case GGML_OP_SSM_CONV:
+ ggml_sycl_ssm_conv(ctx, dst);
+ break;
+ case GGML_OP_ROLL:
+ ggml_sycl_roll(ctx, dst);
+ break;
+ case GGML_OP_ARANGE:
+ ggml_sycl_arange(ctx, dst);
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+} catch (sycl::exception & e) {
+ std::cerr << e.what() << "Exception caught at file:" << __FILE__ << ", line:" << __LINE__ << std::endl;
+ std::cerr << "Error OP "<<ggml_op_name(dst->op)<< std::endl;
+ std::exit(1);
+}
+
+GGML_API void ggml_backend_sycl_get_device_description(int device, char *description,
+ size_t description_size) try {
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_device_description\n");
+ dpct::device_info prop;
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info(
+ prop, dpct::dev_mgr::instance().get_device(device))));
+ snprintf(description, description_size, "%s", prop.get_name());
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+void ggml_backend_sycl_get_device_memory(int device, size_t *free,
+ size_t *total) try {
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_get_device_memory\n");
+ ggml_sycl_set_device(device);
+
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ dpct::dev_mgr::instance().get_device(device).get_memory_info(*free, *total)));
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+
+// backend
+
+static const char * ggml_backend_sycl_get_name(ggml_backend_t backend) {
+
+ ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
+
+ return sycl_ctx->name.c_str();
+}
+
+static void ggml_backend_sycl_free(ggml_backend_t backend) {
+ ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
+
+ delete sycl_ctx;
+ delete backend;
+}
+
+static void ggml_backend_sycl_set_tensor_async(ggml_backend_t backend,
+ ggml_tensor *tensor,
+ const void *data, size_t offset,
+ size_t size) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str());
+ GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset);
+ ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
+ ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
+
+ GGML_ASSERT(buf->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device) && "unsupported buffer type");
+ const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ (stream)->memcpy((char *)tensor->data + offset, data, size)));
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_get_tensor_async(ggml_backend_t backend,
+ const ggml_tensor *tensor,
+ void *data, size_t offset,
+ size_t size) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": tensor", tensor).c_str());
+ GGML_SYCL_DEBUG(" size=%zu offset=%zu\n", size, offset);
+ ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
+ ggml_backend_buffer_t buf = tensor->view_src ? tensor->view_src->buffer : tensor->buffer;
+
+ GGML_ASSERT(buf->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device) && "unsupported buffer type");
+ const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
+ SYCL_CHECK(CHECK_TRY_ERROR((stream)->memcpy(
+ data, (const char *)tensor->data + offset, size)));
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static bool ggml_backend_sycl_cpy_tensor_async(ggml_backend_t backend,
+ const ggml_tensor *src,
+ ggml_tensor *dst) try {
+ ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
+ bool is_cpy_supported = dst->buffer->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device) &&
+ ggml_backend_buffer_is_sycl(src->buffer);
+ GGML_SYCL_DEBUG("[SYCL] call %s", __func__);
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(": dst", dst).c_str());
+ GGML_SYCL_DEBUG("%s", debug_get_tensor_str(" src", src).c_str());
+ GGML_SYCL_DEBUG(" is_cpy_supported=%d\n", is_cpy_supported);
+ if (is_cpy_supported) {
+ /*
+ DPCT1009:215: SYCL uses exceptions to report errors and does not use the
+ error codes. The original code was commented out and a warning string
+ was inserted. You need to rewrite this code.
+ */
+ const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
+ SYCL_CHECK(CHECK_TRY_ERROR((stream)->memcpy(
+ dst->data, src->data, ggml_nbytes(dst))));
+ return true;
+ }
+
+ return false;
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_synchronize(ggml_backend_t backend) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s\n", __func__);
+ ggml_backend_sycl_context * sycl_ctx = (ggml_backend_sycl_context *)backend->context;
+ const queue_ptr stream = sycl_ctx->stream(sycl_ctx->device, 0);
+ SYCL_CHECK(CHECK_TRY_ERROR((stream)->wait()));
+
+ GGML_UNUSED(backend);
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_graph_compute_impl(ggml_backend_sycl_context * sycl_ctx, ggml_cgraph * cgraph) {
+ ggml_sycl_set_main_device(sycl_ctx->device);
+
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ ggml_tensor * node = cgraph->nodes[i];
+ if (ggml_is_empty(node) || node->op == GGML_OP_RESHAPE || node->op == GGML_OP_TRANSPOSE || node->op == GGML_OP_VIEW || node->op == GGML_OP_PERMUTE || node->op == GGML_OP_NONE) {
+ continue;
+ }
+ if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) {
+ continue;
+ }
+#ifndef NDEBUG
+ assert(node->buffer->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device));
+ for (int j = 0; j < GGML_MAX_SRC; j++) {
+ if (node->src[j] != nullptr) {
+ assert(node->src[j]->buffer->buft == ggml_backend_sycl_buffer_type(sycl_ctx->device));
+ }
+ }
+#endif
+ bool ok = ggml_sycl_compute_forward(*sycl_ctx, node);
+ if (!ok) {
+ GGML_LOG_ERROR("%s: error: op not supported %s (%s)\n", __func__, node->name, ggml_op_name(node->op));
+ }
+ GGML_ASSERT(ok);
+ }
+}
+
+#ifdef GGML_SYCL_GRAPH
+static bool check_graph_compatibility(ggml_cgraph * cgraph) {
+ if (ggml_sycl_info().device_count > 1) {
+ // A sycl_ex::command_graph object can only be created for a single device
+ GGML_LOG_INFO("%s: disabling SYCL graphs due to multiple devices\n", __func__);
+ return false;
+ }
+
+ for (int i = 0; i < cgraph->n_nodes; i++) {
+ const ggml_op node_op = cgraph->nodes[i]->op;
+ switch (node_op) {
+ default:
+ break;
+ case GGML_OP_CONCAT:
+ // ggml_sycl_op_concat() does a blocking host wait after memcpy operations,
+ // but wait() can't be called on the events returned by a queue recording
+ // to a graph.
+ [[fallthrough]];
+ case GGML_OP_MUL_MAT_ID:
+ // ggml_sycl_mul_mat_id() does a blocking host wait on the sycl queue after
+ // submitting a memcpy operation, but wait() can't be called on a queue that
+ // is recording to a graph.
+ GGML_LOG_INFO("%s: disabling SYCL graphs due to unsupported node type %s\n", __func__,
+ ggml_op_name(node_op));
+ return false;
+ case GGML_OP_MUL_MAT:
+ // We cannot use graphs with ggml_sycl_mul_mat() when SYCL async memory allocation extensions are not available,
+ // as SYCL malloc / free and host wait calls are not supported when recording to a graph which are all present
+ // in reordering.
+ if (!g_ggml_sycl_use_async_mem_op) {
+ GGML_LOG_INFO(
+ "%s: disabling SYCL graphs due to unsupported node type when using a compiler without the "
+ "oneAPI async memory allocation extension "
+ "%s\n",
+ __func__, ggml_op_name(node_op));
+ return false;
+ }
+ }
+ }
+ return true;
+}
+#endif
+
+static ggml_status ggml_backend_sycl_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
+ auto * sycl_ctx = static_cast<ggml_backend_sycl_context *>(backend->context);
+
+#ifdef GGML_SYCL_GRAPH
+ bool use_sycl_graph = !g_ggml_sycl_disable_graph && check_graph_compatibility(cgraph);
+ if (use_sycl_graph) {
+ const bool graph_support = dpct::get_device(sycl_ctx->device).has(sycl::aspect::ext_oneapi_limited_graph);
+ if (!graph_support) {
+ GGML_SYCL_DEBUG("[SYCL-GRAPH] can not use graphs on device:%d\n", sycl_ctx->device);
+ ggml_backend_sycl_graph_compute_impl(sycl_ctx, cgraph);
+ return GGML_STATUS_SUCCESS;
+ }
+
+ sycl_ex::command_graph model_sycl_graph(*(sycl_ctx->stream()), {sycl_ex::property::graph::assume_buffer_outlives_graph{}});
+
+ model_sycl_graph.begin_recording(*(sycl_ctx->stream()));
+ ggml_backend_sycl_graph_compute_impl(sycl_ctx, cgraph);
+ model_sycl_graph.end_recording();
+
+ const bool graph_update_support = dpct::get_device(sycl_ctx->device).has(sycl::aspect::ext_oneapi_graph);
+ if (!sycl_ctx->exec_graph || !graph_update_support) {
+ auto exec_graph = graph_update_support ? model_sycl_graph.finalize(sycl_ex::property::graph::updatable{}) :
+ model_sycl_graph.finalize();
+ sycl_ctx->exec_graph = std::make_unique<
+ sycl_ex::command_graph<sycl_ex::graph_state::executable>>(exec_graph);
+ } else {
+ try {
+ sycl_ctx->exec_graph->update(model_sycl_graph);
+ GGML_SYCL_DEBUG("[SYCL-GRAPH] update success\n");
+ } catch (sycl::exception const & e) {
+ GGML_SYCL_DEBUG("[SYCL-GRAPH] Exception when updating graph, %s\n", e.what());
+ auto exec_graph = model_sycl_graph.finalize({sycl_ex::property::graph::updatable{}});
+ sycl_ctx->exec_graph = std::make_unique<
+ sycl_ex::command_graph<sycl_ex::graph_state::executable>>(exec_graph);
+ }
+ }
+
+ sycl_ctx->stream()->ext_oneapi_graph(*(sycl_ctx->exec_graph));
+ } else
+#endif
+ {
+ ggml_backend_sycl_graph_compute_impl(sycl_ctx, cgraph);
+ }
+ return GGML_STATUS_SUCCESS;
+}
+
+static void ggml_backend_sycl_event_record(ggml_backend_t backend, ggml_backend_event_t event)
+try
+{
+ ggml_backend_sycl_context *sycl_ctx =
+ (ggml_backend_sycl_context *)backend->context;
+
+ sycl::event *sycl_event = static_cast<sycl::event *>(event->context);
+
+ const queue_ptr &stream = sycl_ctx->stream(sycl_ctx->device, 0);
+ // Record the current state of the queue
+ SYCL_CHECK(CHECK_TRY_ERROR(*sycl_event = stream->ext_oneapi_submit_barrier()));
+}
+catch (sycl::exception const &exc)
+{
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_backend_sycl_event_wait(ggml_backend_t backend, ggml_backend_event_t event) try {
+ GGML_SYCL_DEBUG("[SYCL] call %s\n", __func__);
+ sycl::event* sycl_event = static_cast<sycl::event*>(event->context);
+
+ if (ggml_backend_is_sycl(backend)) {
+ SYCL_CHECK(CHECK_TRY_ERROR(sycl_event->wait()));
+ } else
+ GGML_ABORT("fatal error");
+} catch (sycl::exception const& exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static ggml_backend_i ggml_backend_sycl_interface = {
+ /* .get_name = */ ggml_backend_sycl_get_name,
+ /* .free = */ ggml_backend_sycl_free,
+ /* .set_tensor_async = */ ggml_backend_sycl_set_tensor_async,
+ /* .get_tensor_async = */ ggml_backend_sycl_get_tensor_async,
+ /* .cpy_tensor_async = */ NULL, // ggml_backend_sycl_cpy_tensor_async,
+ // // TODO: update for the new
+ // interface
+ /* .synchronize = */ ggml_backend_sycl_synchronize,
+ /* .graph_plan_create = */ NULL,
+ /* .graph_plan_free = */ NULL,
+ /* .graph_plan_update = */ NULL,
+ /* .graph_plan_compute = */ NULL,
+ /* .graph_compute = */ ggml_backend_sycl_graph_compute,
+ /* .event_record = */ ggml_backend_sycl_event_record,
+ /* .event_wait = */ ggml_backend_sycl_event_wait,
+ /* .graph_optimize = */ NULL,
+};
+
+static ggml_guid_t ggml_backend_sycl_guid() {
+ static ggml_guid guid = { 0x58, 0x05, 0x13, 0x8f, 0xcd, 0x3a, 0x61, 0x9d, 0xe7, 0xcd, 0x98, 0xa9, 0x03, 0xfd, 0x7c, 0x53 };
+ return &guid;
+}
+
+bool ggml_backend_is_sycl(ggml_backend_t backend) {
+ return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_sycl_guid());
+}
+
+int ggml_backend_sycl_get_device_count() {
+ return ggml_sycl_info().device_count;
+}
+
+
+// backend device
+
+struct ggml_backend_sycl_device_context {
+ int device;
+ std::string name;
+ std::string description;
+ int op_offload_min_batch_size;
+};
+
+static const char * ggml_backend_sycl_device_get_name(ggml_backend_dev_t dev) {
+ ggml_backend_sycl_device_context * ctx = (ggml_backend_sycl_device_context *)dev->context;
+ return ctx->name.c_str();
+}
+
+static const char * ggml_backend_sycl_device_get_description(ggml_backend_dev_t dev) {
+ ggml_backend_sycl_device_context * ctx = (ggml_backend_sycl_device_context *)dev->context;
+ return ctx->description.c_str();
+}
+
+static void ggml_backend_sycl_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
+ ggml_backend_sycl_device_context * ctx = (ggml_backend_sycl_device_context *)dev->context;
+ ggml_sycl_set_device(ctx->device);
+ SYCL_CHECK(CHECK_TRY_ERROR(
+ dpct::dev_mgr::instance().get_device(ctx->device).get_memory_info(*free, *total)));
+}
+
+static enum ggml_backend_dev_type ggml_backend_sycl_device_get_type(ggml_backend_dev_t dev) {
+ GGML_UNUSED(dev);
+ return GGML_BACKEND_DEVICE_TYPE_GPU;
+}
+
+static void ggml_backend_sycl_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
+ props->name = ggml_backend_sycl_device_get_name(dev);
+ props->description = ggml_backend_sycl_device_get_description(dev);
+ props->type = ggml_backend_sycl_device_get_type(dev);
+ ggml_backend_sycl_device_get_memory(dev, &props->memory_free, &props->memory_total);
+
+ bool host_buffer = getenv("GGML_SYCL_NO_PINNED") == nullptr;
+#ifdef GGML_SYCL_NO_PEER_COPY
+ bool events = false;
+#else
+ bool events = true;
+#endif
+
+ props->caps = {
+ /* .async = */ true,
+ /* .host_buffer = */ host_buffer,
+ /* .buffer_from_host_ptr = */ false,
+ /* .events = */ events,
+ };
+}
+
+static ggml_backend_t ggml_backend_sycl_device_init(ggml_backend_dev_t dev, const char * params) {
+ GGML_UNUSED(params);
+ ggml_backend_sycl_device_context * ctx = (ggml_backend_sycl_device_context *)dev->context;
+ return ggml_backend_sycl_init(ctx->device);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_sycl_device_get_buffer_type(ggml_backend_dev_t dev) {
+ ggml_backend_sycl_device_context * ctx = (ggml_backend_sycl_device_context *)dev->context;
+ return ggml_backend_sycl_buffer_type(ctx->device);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_sycl_device_get_host_buffer_type(ggml_backend_dev_t dev) {
+ GGML_UNUSED(dev);
+ return ggml_backend_sycl_host_buffer_type();
+}
+
+static ggml_backend_buffer_t ggml_backend_sycl_device_buffer_from_host_ptr(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
+ GGML_UNUSED(dev);
+ GGML_UNUSED(ptr);
+ GGML_UNUSED(size);
+ GGML_UNUSED(max_tensor_size);
+ return nullptr;
+}
+
+static bool ggml_backend_sycl_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+ ggml_backend_sycl_device_context *sycl_ctx =
+ (ggml_backend_sycl_device_context *)dev->context;
+ int device = sycl_ctx->device;
+ switch (op->op) {
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ ggml_type src0_type = op->src[0]->type;
+ ggml_type src1_type = op->src[1]->type;
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ return false;
+ }
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(op)) {
+ case GGML_UNARY_OP_SGN:
+ case GGML_UNARY_OP_ABS:
+ case GGML_UNARY_OP_NEG:
+ case GGML_UNARY_OP_STEP:
+ case GGML_UNARY_OP_RELU:
+ case GGML_UNARY_OP_HARDSIGMOID:
+ case GGML_UNARY_OP_TANH:
+ case GGML_UNARY_OP_GELU:
+ case GGML_UNARY_OP_SILU:
+ case GGML_UNARY_OP_SIGMOID:
+ case GGML_UNARY_OP_HARDSWISH:
+ case GGML_UNARY_OP_GELU_QUICK:
+ case GGML_UNARY_OP_GELU_ERF:
+ case GGML_UNARY_OP_EXP:
+ case GGML_UNARY_OP_SOFTPLUS:
+ case GGML_UNARY_OP_ELU:
+ case GGML_UNARY_OP_CEIL:
+ return true;
+ case GGML_UNARY_OP_FLOOR:
+ case GGML_UNARY_OP_ROUND:
+ case GGML_UNARY_OP_TRUNC:
+#if defined (GGML_SYCL_F16)
+ return ggml_is_contiguous(op->src[0]) && (op->type == op->src[0]->type);
+#else
+ return ggml_is_contiguous(op->src[0]) && (op->src[0]->type == GGML_TYPE_F32 && op->type == GGML_TYPE_F32) && (op->type == op->src[0]->type);
+#endif
+ default:
+ return false;
+ }
+ case GGML_OP_GLU:
+ switch (ggml_get_glu_op(op)) {
+ case GGML_GLU_OP_REGLU:
+ case GGML_GLU_OP_GEGLU:
+ case GGML_GLU_OP_SWIGLU:
+ case GGML_GLU_OP_SWIGLU_OAI:
+ case GGML_GLU_OP_GEGLU_ERF:
+ case GGML_GLU_OP_GEGLU_QUICK:
+ return ggml_is_contiguous_1(op->src[0]);
+ default:
+ return false;
+ }
+ break;
+ case GGML_OP_MUL_MAT:
+ case GGML_OP_MUL_MAT_ID:
+ {
+ struct ggml_tensor * a = op->src[0];
+ struct ggml_tensor * b = op->src[1];
+
+ if (a->ne[3] != b->ne[3]) {
+ return false;
+ }
+ ggml_type a_type = a->type;
+ if (a_type == GGML_TYPE_IQ4_NL || a_type == GGML_TYPE_IQ4_XS ||
+ a_type == GGML_TYPE_IQ3_XXS || a_type == GGML_TYPE_IQ3_S ||
+ a_type == GGML_TYPE_IQ2_XXS || a_type == GGML_TYPE_IQ2_XS || a_type == GGML_TYPE_IQ2_S ||
+ a_type == GGML_TYPE_IQ1_S || a_type == GGML_TYPE_IQ1_M
+ ) {
+ if (b->ne[1] == 1 && ggml_nrows(b) > 1) {
+ return false;
+ }
+ }
+ ggml_type src0_type = op->src[0]->type;
+ if (src0_type == GGML_TYPE_BF16 ) {
+ // TODO: support GGML_TYPE_BF16
+ // FIXME: keep a list of supported types to avoid breaking the backend when a new type is added
+ return false;
+ }
+
+ // TODO: The configuration below needs more work to be supported with oneDNN
+ if (ggml_is_permuted(a) && !ggml_is_contiguous(a) &&
+ a->ne[2] > 1 && a->ne[3] > 1 && src0_type == GGML_TYPE_F16) {
+ return false;
+ }
+
+ // TODO: This specific configuration can fail with oneDNN and needs more debugging
+ if (!ggml_is_permuted(a) && ggml_is_permuted(b) && b->ne[2] > 1 && b->ne[3] > 1 &&
+ a->ne[0] > 128 && a->ne[2] == 1 && src0_type == GGML_TYPE_F16) {
+ return false;
+ }
+ return true;
+ }
+ case GGML_OP_OUT_PROD:
+ return op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32 && op->src[1]->type == GGML_TYPE_F32 && op->ne[2] == 1 && op->ne[3] == 1;
+ case GGML_OP_GET_ROWS:
+ {
+ switch (op->src[0]->type) {
+ case GGML_TYPE_F16:
+ case GGML_TYPE_F32:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ return true;
+ default:
+ return false;
+ }
+ }
+ case GGML_OP_SET:
+ return (op->type == GGML_TYPE_F32) &&
+ (op->src[0] && op->src[1]) &&
+ (op->src[0]->type == GGML_TYPE_F32) &&
+ (op->src[1]->type == GGML_TYPE_F32);
+
+ case GGML_OP_SET_ROWS:
+ {
+ return ((op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_BF16 ||
+ op->type == GGML_TYPE_Q8_0 || op->type == GGML_TYPE_Q5_1 || op->type == GGML_TYPE_Q5_0 ||
+ op->type == GGML_TYPE_Q4_1 || op->type == GGML_TYPE_Q4_0 || op->type == GGML_TYPE_IQ4_NL) &&
+ (op->src[1]->type == GGML_TYPE_I64 || op->src[1]->type == GGML_TYPE_I32));
+ }
+ break;
+ case GGML_OP_CPY:
+ {
+ ggml_type src0_type = op->src[0]->type;
+ ggml_type src1_type = op->src[1]->type;
+ if (src0_type == src1_type && (ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1])) && src0_type != GGML_TYPE_BF16) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_F16) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q8_0) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_0) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q4_1) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F16) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F16 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_Q8_0 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_Q4_0 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_Q4_1 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q5_0) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_Q5_0 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_Q5_1) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_Q5_1 && src1_type == GGML_TYPE_F32) {
+ return true;
+ }
+ if (src0_type == GGML_TYPE_F32 && src1_type == GGML_TYPE_IQ4_NL) {
+ return true;
+ }
+ if(src0_type == GGML_TYPE_Q8_0 && src1_type == GGML_TYPE_Q8_0) {
+ return true;
+ }
+ if(src0_type == GGML_TYPE_Q5_0 && src1_type == GGML_TYPE_Q5_0) {
+ return true;
+ }
+ if(src0_type == GGML_TYPE_Q5_1 && src1_type == GGML_TYPE_Q5_1) {
+ return true;
+ }
+ if(src0_type == GGML_TYPE_Q4_0 && src1_type == GGML_TYPE_Q4_0) {
+ return true;
+ }
+ if(src0_type == GGML_TYPE_Q4_1 && src1_type == GGML_TYPE_Q4_1) {
+ return true;
+ }
+ return false;
+ }
+ case GGML_OP_REPEAT_BACK:
+ {
+ ggml_type src0_type = op->src[0]->type;
+ return src0_type == GGML_TYPE_F32;
+ }
+ case GGML_OP_CONCAT:
+ case GGML_OP_DUP:
+ case GGML_OP_ARGMAX:
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_ADD:
+ case GGML_OP_ADD1:
+ case GGML_OP_ADD_ID:
+ case GGML_OP_SUB:
+ case GGML_OP_COUNT_EQUAL:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ case GGML_OP_REPEAT:
+ return true;
+ case GGML_OP_PAD_REFLECT_1D:
+ return ggml_is_contiguous(op->src[0]) && op-> type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32;
+ case GGML_OP_SQR:
+ case GGML_OP_SQRT:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_CLAMP:
+ case GGML_OP_LOG:
+#if defined (GGML_SYCL_F16)
+ return ((op->type == GGML_TYPE_F32 || op->type == GGML_SYCL_F16) && (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_SYCL_F16) && (op->type == op->src[0]->type));
+#else
+ return (op->type == GGML_TYPE_F32 && op->src[0]->type == GGML_TYPE_F32) && (op->type == op->src[0]->type);
+#endif
+ case GGML_OP_NORM:
+ case GGML_OP_L2_NORM:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_RMS_NORM:
+ return true;
+ case GGML_OP_RMS_NORM_BACK:
+ return ggml_is_contiguous(op->src[0]);
+ case GGML_OP_SCALE:
+ return true;
+ case GGML_OP_CONT:
+ return op->src[0]->type != GGML_TYPE_BF16;
+ case GGML_OP_TRI:
+ {
+ const ggml_tensor * src0 = op->src[0];
+ return src0 &&
+ op->type == GGML_TYPE_F32 &&
+ ggml_is_contiguous(src0);
+ }
+ case GGML_OP_DIAG_MASK_INF:
+ return true;
+ case GGML_OP_SOFT_MAX:
+ return true;
+ case GGML_OP_SOFT_MAX_BACK: {
+ float max_bias = 0.0f;
+ memcpy(&max_bias, (const float *) op->op_params + 1, sizeof(float));
+ return max_bias == 0.0f;
+ }
+ case GGML_OP_ROPE:
+ case GGML_OP_IM2COL:
+ return true;
+ case GGML_OP_UPSCALE:
+ return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST && !(op->op_params[0] & GGML_SCALE_FLAG_ANTIALIAS);
+ case GGML_OP_SUM:
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_MEAN:
+ return ggml_is_contiguous(op->src[0]);
+ case GGML_OP_ARGSORT:
+ return op->src[0]->ne[0] * sizeof(int) <=
+ ggml_sycl_info().devices[device].smpbo;
+ case GGML_OP_TOP_K: {
+ const ggml_tensor * src0 = op->src[0];
+ const int k = op->ne[0];
+ return src0 &&
+ op->type == GGML_TYPE_I32 &&
+ src0->type == GGML_TYPE_F32 &&
+ ggml_is_contiguous(src0) &&
+ k > 0 && k <= 32;
+ }
+ case GGML_OP_POOL_2D:
+ case GGML_OP_ACC:
+ return true;
+ case GGML_OP_PAD:
+ // TODO: add circular padding support for syscl, see https://github.com/ggml-org/llama.cpp/pull/16985
+ if (ggml_get_op_params_i32(op, 8) != 0) {
+ return false;
+ }
+ return ggml_is_contiguous(op->src[0]);
+ case GGML_OP_LEAKY_RELU:
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ case GGML_OP_RWKV_WKV6:
+ case GGML_OP_RWKV_WKV7:
+ case GGML_OP_GATED_LINEAR_ATTN:
+ return true;
+ case GGML_OP_SSM_CONV:
+ return op->type == GGML_TYPE_F32 &&
+ op->src[0]->type == GGML_TYPE_F32 &&
+ op->src[1]->type == GGML_TYPE_F32;
+ case GGML_OP_ROLL:
+ return op->type == GGML_TYPE_F32;
+ case GGML_OP_ARANGE:
+ return op->type == GGML_TYPE_F32;
+ default:
+ return false;
+ }
+
+ GGML_UNUSED(dev);
+}
+
+static bool ggml_backend_sycl_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
+ if (buft->iface.get_name != ggml_backend_sycl_buffer_type_get_name) {
+ return false;
+ }
+ ggml_backend_sycl_buffer_type_context * buft_ctx = (ggml_backend_sycl_buffer_type_context *)buft->context;
+ ggml_backend_sycl_device_context * sycl_ctx = (ggml_backend_sycl_device_context *)dev->context;
+ return buft_ctx->device == sycl_ctx->device;
+}
+
+static int64_t get_op_batch_size(const ggml_tensor * op) {
+ switch (op->op) {
+ case GGML_OP_GET_ROWS:
+ return 0;
+ case GGML_OP_MUL_MAT:
+ return op->ne[1];
+ case GGML_OP_MUL_MAT_ID:
+ case GGML_OP_ROPE:
+ return op->ne[2];
+ default:
+ return ggml_nrows(op);
+ }
+}
+
+static bool ggml_backend_sycl_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+ ggml_backend_sycl_device_context * sycl_ctx = (ggml_backend_sycl_device_context *)dev->context;
+ return get_op_batch_size(op) >= sycl_ctx->op_offload_min_batch_size;
+}
+
+static ggml_backend_event_t
+ggml_backend_sycl_device_event_new(ggml_backend_dev_t dev) {
+
+#ifdef GGML_SYCL_NO_PEER_COPY
+ return nullptr;
+#else
+ sycl::event *event_ptr = new sycl::event();
+
+ return new ggml_backend_event{
+ /* .device = */ dev,
+ /* .context = */ event_ptr,
+ };
+#endif
+}
+
+static void ggml_backend_sycl_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) try {
+ GGML_UNUSED(dev);
+ if (event == nullptr) {
+ return;
+ }
+
+ if (event->context != nullptr) {
+ sycl::event *sycl_event = static_cast<sycl::event *>(event->context);
+ delete sycl_event;
+ event->context = nullptr;
+ }
+
+ delete event;
+} catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+
+static void ggml_backend_sycl_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) try {
+ GGML_UNUSED(dev);
+ GGML_SYCL_DEBUG("[SYCL] call %s\n", __func__);
+
+ sycl::event *sycl_event = static_cast<sycl::event *>(event->context);
+ SYCL_CHECK(CHECK_TRY_ERROR(sycl_event->wait()));
+} catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static const ggml_backend_device_i ggml_backend_sycl_device_interface = {
+ /* .get_name = */ ggml_backend_sycl_device_get_name,
+ /* .get_description = */ ggml_backend_sycl_device_get_description,
+ /* .get_memory = */ ggml_backend_sycl_device_get_memory,
+ /* .get_type = */ ggml_backend_sycl_device_get_type,
+ /* .get_props = */ ggml_backend_sycl_device_get_props,
+ /* .init_backend = */ ggml_backend_sycl_device_init,
+ /* .get_buffer_type = */ ggml_backend_sycl_device_get_buffer_type,
+ /* .get_host_buffer_type = */ ggml_backend_sycl_device_get_host_buffer_type,
+ /* .buffer_from_host_ptr = */ ggml_backend_sycl_device_buffer_from_host_ptr,
+ /* .supports_op = */ ggml_backend_sycl_device_supports_op,
+ /* .supports_buft = */ ggml_backend_sycl_device_supports_buft,
+ /* .offload_op = */ ggml_backend_sycl_device_offload_op,
+ /* .event_new = */ ggml_backend_sycl_device_event_new,
+ /* .event_free = */ ggml_backend_sycl_device_event_free,
+ /* .event_synchronize = */ ggml_backend_sycl_device_event_synchronize,
+};
+
+// backend reg
+
+struct ggml_backend_sycl_reg_context {
+ std::vector<ggml_backend_dev_t> devices;
+};
+
+static const char * ggml_backend_sycl_reg_get_name(ggml_backend_reg_t reg) {
+ GGML_UNUSED(reg);
+ return GGML_SYCL_NAME;
+}
+
+static size_t ggml_backend_sycl_reg_get_device_count(ggml_backend_reg_t reg) {
+ ggml_backend_sycl_reg_context * ctx = (ggml_backend_sycl_reg_context *)reg->context;
+ return ctx->devices.size();
+}
+
+static ggml_backend_dev_t ggml_backend_sycl_reg_get_device(ggml_backend_reg_t reg, size_t index) {
+ ggml_backend_sycl_reg_context * ctx = (ggml_backend_sycl_reg_context *)reg->context;
+ GGML_ASSERT(index < ctx->devices.size());
+ return ctx->devices[index];
+}
+
+static void *ggml_backend_sycl_reg_get_proc_address(ggml_backend_reg_t reg, const char *name) {
+ GGML_UNUSED(reg);
+
+ if (strcmp(name, "ggml_backend_split_buffer_type") == 0) {
+ return (void *)ggml_backend_sycl_split_buffer_type;
+ }
+
+ // SYCL doesn't support registering host memory, left here for reference
+ // "ggml_backend_register_host_buffer"
+ // "ggml_backend_unregister_host_buffer"
+ GGML_UNUSED(name);
+ return nullptr;
+}
+
+static const ggml_backend_reg_i ggml_backend_sycl_reg_interface = {
+ /* .get_name = */ ggml_backend_sycl_reg_get_name,
+ /* .get_device_count = */ ggml_backend_sycl_reg_get_device_count,
+ /* .get_device = */ ggml_backend_sycl_reg_get_device,
+ /* .get_proc_address = */ ggml_backend_sycl_reg_get_proc_address,
+};
+
+
+// backend registry
+
+ggml_backend_reg_t ggml_backend_sycl_reg() {
+ static ggml_backend_reg reg;
+ static bool initialized = false;
+
+ {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+ if (!initialized) {
+ ggml_backend_sycl_reg_context * ctx = new ggml_backend_sycl_reg_context;
+ const int min_batch_size = getenv("GGML_OP_OFFLOAD_MIN_BATCH") ? atoi(getenv("GGML_OP_OFFLOAD_MIN_BATCH")) : 32;
+
+ for (int i = 0; i < ggml_sycl_info().device_count; i++) {
+ ggml_backend_sycl_device_context * dev_ctx = new ggml_backend_sycl_device_context;
+ dev_ctx->device = i;
+ dev_ctx->name = GGML_SYCL_NAME + std::to_string(i);
+
+ ggml_sycl_set_device(i);
+
+ dpct::device_info prop;
+ SYCL_CHECK(CHECK_TRY_ERROR(dpct::get_device_info(
+ prop, dpct::dev_mgr::instance().get_device(i))));
+
+ dev_ctx->description = prop.get_name();
+ dev_ctx->op_offload_min_batch_size = min_batch_size;
+
+ ggml_backend_dev_t dev = new ggml_backend_device {
+ /* .iface = */ ggml_backend_sycl_device_interface,
+ /* .reg = */ &reg,
+ /* .context = */ dev_ctx
+ };
+ ctx->devices.push_back(dev);
+ }
+
+ reg = ggml_backend_reg {
+ /* .api_version = */ GGML_BACKEND_API_VERSION,
+ /* .iface = */ ggml_backend_sycl_reg_interface,
+ /* .context = */ ctx
+ };
+ }
+
+ initialized = true;
+ }
+
+ return &reg;
+}
+
+ggml_backend_t ggml_backend_sycl_init(int device) {
+ GGML_SYCL_DEBUG("[SYCL] call ggml_backend_sycl_init\n");
+ ggml_check_sycl();
+
+ check_allow_gpu_index(device);
+
+ ggml_backend_sycl_context * ctx = new ggml_backend_sycl_context(device);
+ if (ctx == nullptr) {
+ GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
+ return nullptr;
+ };
+
+ ggml_backend_t sycl_backend = new ggml_backend {
+ /* .guid = */ ggml_backend_sycl_guid(),
+ /* .iface = */ ggml_backend_sycl_interface,
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_sycl_reg(), device),
+ /* .context = */ ctx
+ };
+
+ return sycl_backend;
+}
+
+GGML_BACKEND_DL_IMPL(ggml_backend_sycl_reg)
diff --git a/llama.cpp/ggml/src/ggml-sycl/gla.cpp b/llama.cpp/ggml/src/ggml-sycl/gla.cpp
new file mode 100644
index 0000000..879184f
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/gla.cpp
@@ -0,0 +1,106 @@
+#include <sycl/sycl.hpp>
+
+#include "common.hpp"
+
+template <u_int HEAD_SIZE>
+static void gated_linear_attn_f32_kernel(const dpct::queue_ptr stream, u_int B, u_int T, u_int C, u_int H, float scale,
+ const float * k, const float * v, const float * r, const float * td,
+ const float * s, float * dst) {
+ const u_int head_size = HEAD_SIZE;
+ const u_int state_size = C * head_size;
+ const u_int n_seq_tokens = T / B;
+ sycl::range<1> block_dims((C / H));
+ sycl::range<1> grid_dims((B * H));
+ stream->submit([&](sycl::handler & cgh) {
+ /* local memory accessors*/
+ auto _k = sycl::local_accessor<float, 1>(sycl::range<1>(head_size), cgh);
+ auto _r = sycl::local_accessor<float, 1>(sycl::range<1>(head_size), cgh);
+ auto _td = sycl::local_accessor<float, 1>(sycl::range<1>(head_size), cgh);
+
+ cgh.parallel_for(sycl::nd_range<1>(grid_dims * block_dims, block_dims), [=](sycl::nd_item<1> item) {
+ u_int tid = item.get_local_id(0);
+ u_int bid = item.get_group(0);
+
+ u_int batch_i = bid / H;
+ u_int head_i = bid % H;
+
+ float state[head_size];
+
+#pragma unroll
+ for (u_int i = 0; i < head_size; i++) {
+ state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid];
+ }
+
+ for (u_int t = batch_i * n_seq_tokens * C + head_i * head_size + tid;
+ t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid; t += C) {
+
+ item.barrier(sycl::access::fence_space::local_space); //sync threads
+ _k[tid] = k[t];
+ _r[tid] = r[t];
+ _td[tid] = td[t];
+ item.barrier(sycl::access::fence_space::local_space); //sync threads
+
+ const float _v = v[t];
+ float y = 0;
+
+ for (u_int j = 0; j < head_size; j += 4) {
+ const sycl::float4 & k = (sycl::float4 &) (_k[j]);
+ const sycl::float4 & r = (sycl::float4 &) (_r[j]);
+ const sycl::float4 & td = (sycl::float4 &) (_td[j]);
+ sycl::float4 & s = (sycl::float4 &) (state[j]);
+ sycl::float4 kv;
+
+ kv.x() = k.x() * _v;
+ kv.y() = k.y() * _v;
+ kv.z() = k.z() * _v;
+ kv.w() = k.w() * _v;
+
+ s.x() = s.x() * td.x() + kv.x();
+ s.y() = s.y() * td.y() + kv.y();
+ s.z() = s.z() * td.z() + kv.z();
+ s.w() = s.w() * td.w() + kv.w();
+
+ y += r.x() * s.x();
+ y += r.y() * s.y();
+ y += r.z() * s.z();
+ y += r.w() * s.w();
+ }
+ dst[t] = y * scale;
+ }
+#pragma unroll
+ for (u_int i = 0; i < head_size; i++) {
+ dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i];
+ }
+ });
+ });
+}
+
+void ggml_sycl_op_gated_linear_attn(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/5);
+ const float * k_d = static_cast<const float *>(dst->src[0]->data);
+ const float * v_d = static_cast<const float *>(dst->src[1]->data);
+ const float * r_d = static_cast<const float *>(dst->src[2]->data);
+ const float * td_d = static_cast<const float *>(dst->src[3]->data);
+ const float * s_d = static_cast<const float *>(dst->src[4]->data);
+
+ const int64_t B = dst->src[4]->ne[1];
+ const int64_t T = dst->src[0]->ne[2];
+ const int64_t C = dst->ne[0];
+ const int64_t H = dst->src[0]->ne[1];
+
+ dpct::queue_ptr stream = ctx.stream();
+ GGML_ASSERT(dst->src[4]->type == GGML_TYPE_F32);
+ GGML_ASSERT(C % H == 0);
+ GGML_ASSERT(C / H == 64 || C / H == 128);
+
+ float scale;
+ memcpy(&scale, dst->op_params, sizeof(float));
+
+ float * dst_d = (float *) dst->data;
+
+ if (C / H == 64) {
+ gated_linear_attn_f32_kernel<64>(stream, B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d);
+ } else {
+ gated_linear_attn_f32_kernel<128>(stream, B, T, C, H, scale, k_d, v_d, r_d, td_d, s_d, dst_d);
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/gla.hpp b/llama.cpp/ggml/src/ggml-sycl/gla.hpp
new file mode 100644
index 0000000..607cf3a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/gla.hpp
@@ -0,0 +1,8 @@
+#ifndef GGML_SYCL_GLA_HPP
+#define GGML_SYCL_GLA_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_gated_linear_attn(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_GLA_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/im2col.cpp b/llama.cpp/ggml/src/ggml-sycl/im2col.cpp
new file mode 100644
index 0000000..6d75d34
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/im2col.cpp
@@ -0,0 +1,136 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include "im2col.hpp"
+
+#include <sycl/sycl.hpp>
+#include <type_traits> // For std::is_same_v
+
+#include "ggml.h"
+
+template <typename T>
+static void im2col_kernel(const float * x, T * dst, int64_t batch_offset, int64_t offset_delta, int64_t IC, int64_t IW,
+ int64_t IH, int64_t OH, int64_t OW, int64_t KW, int64_t KH, int64_t pelements, int64_t CHW,
+ int s0, int s1, int p0, int p1, int d0, int d1, const sycl::nd_item<3> & item_ct1) {
+ const int64_t work_group_size = item_ct1.get_local_range(2);
+ const int64_t global_id = item_ct1.get_local_id(2) + (work_group_size * item_ct1.get_group(2));
+
+ // make each work-item deal with more elements since sycl global range can not exceed max int
+ for (int64_t i = global_id; i < pelements; i += (work_group_size * item_ct1.get_group_range(2))) {
+ const int64_t ksize = OW * KH;
+ const int64_t kx = i / ksize;
+ const int64_t kd = kx * ksize;
+ const int64_t ky = (i - kd) / OW;
+ const int64_t ix = i % OW;
+
+ const int64_t oh = item_ct1.get_group(1);
+ const int64_t batch = item_ct1.get_group(0) / IC;
+ const int64_t ic = item_ct1.get_group(0) % IC;
+
+ const int64_t iiw = (ix * s0) + (kx * d0) - p0;
+ const int64_t iih = (oh * s1) + (ky * d1) - p1;
+
+ const int64_t offset_dst = (((batch * OH + oh) * OW + ix) * CHW) + (ic * (KW * KH) + ky * KW + kx);
+
+ const int64_t offset_src_base = (ic * offset_delta) + (batch * batch_offset);
+ const int64_t offset_src = offset_src_base + (iih * IW) + iiw;
+
+ const bool out_of_bounds = (iih < 0 || iih >= IH || iiw < 0 || iiw >= IW);
+ const float src_val = out_of_bounds ? 0.0f : x[offset_src];
+
+ if constexpr (std::is_same_v<T, sycl::half>) {
+ dst[offset_dst] = sycl::half(src_val);
+ } else if constexpr (std::is_same_v<T, float>) {
+ dst[offset_dst] = src_val;
+ }
+ }
+}
+
+template <typename T>
+static void im2col_sycl_internal(const float * x, T * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW,
+ int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta,
+ int s0, int s1, int p0, int p1, int d0, int d1, queue_ptr stream) {
+ const int64_t parallel_elements = OW * KW * KH;
+ const int64_t num_blocks = (parallel_elements + SYCL_IM2COL_BLOCK_SIZE - 1) / SYCL_IM2COL_BLOCK_SIZE;
+
+ // decrease global range when it exceeds the max int
+ int64_t local_size = downsample_sycl_global_range(batch * IC * OH * num_blocks, SYCL_IM2COL_BLOCK_SIZE);
+
+ sycl::range<3> block_nums(batch * IC, OH, num_blocks);
+ sycl::range<3> local_range(1, 1, local_size);
+
+ const int64_t CHW = IC * KH * KW;
+
+ stream->parallel_for(sycl::nd_range<3>(block_nums * local_range, local_range), [=](sycl::nd_item<3> item_ct1) {
+ im2col_kernel<T>(x, dst, batch_offset, offset_delta, IC, IW, IH, OH, OW, KW, KH, parallel_elements, CHW, s0, s1,
+ p0, p1, d0, d1, item_ct1);
+ });
+}
+
+static void im2col_sycl_f16(const float * x, sycl::half * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH,
+ int64_t KW, int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset,
+ int64_t offset_delta, int s0, int s1, int p0, int p1, int d0, int d1, queue_ptr stream) {
+ if (!stream->get_device().has(sycl::aspect::fp16)) {
+ throw sycl::exception(sycl::make_error_code(sycl::errc::kernel_not_supported),
+ "Device does not support half precision (fp16) operations!");
+ }
+ im2col_sycl_internal<sycl::half>(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0,
+ p1, d0, d1, stream);
+}
+
+static void im2col_sycl_f32(const float * x, float * dst, int64_t IW, int64_t IH, int64_t OW, int64_t OH, int64_t KW,
+ int64_t KH, int64_t IC, int64_t batch, int64_t batch_offset, int64_t offset_delta, int s0,
+ int s1, int p0, int p1, int d0, int d1, queue_ptr stream) {
+ im2col_sycl_internal<float>(x, dst, IW, IH, OW, OH, KW, KH, IC, batch, batch_offset, offset_delta, s0, s1, p0, p1,
+ d0, d1, stream);
+}
+
+void ggml_sycl_op_im2col(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+ const ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F16 || dst->type == GGML_TYPE_F32);
+
+ const int32_t s0 = ((const int32_t *) (dst->op_params))[0];
+ const int32_t s1 = ((const int32_t *) (dst->op_params))[1];
+ const int32_t p0 = ((const int32_t *) (dst->op_params))[2];
+ const int32_t p1 = ((const int32_t *) (dst->op_params))[3];
+ const int32_t d0 = ((const int32_t *) (dst->op_params))[4];
+ const int32_t d1 = ((const int32_t *) (dst->op_params))[5];
+
+ const bool is_2D = ((const int32_t *) (dst->op_params))[6] == 1;
+
+ const int64_t IC = src1->ne[is_2D ? 2 : 1];
+ const int64_t IH = is_2D ? src1->ne[1] : 1;
+ const int64_t IW = src1->ne[0];
+
+ const int64_t KH = is_2D ? src0->ne[1] : 1;
+ const int64_t KW = src0->ne[0];
+
+ const int64_t OH = is_2D ? dst->ne[2] : 1;
+ const int64_t OW = dst->ne[1];
+
+ const size_t delta_offset = src1->nb[is_2D ? 2 : 1] / sizeof(float);
+ const int64_t batch = src1->ne[is_2D ? 3 : 2];
+ const size_t batch_offset = src1->nb[is_2D ? 3 : 2] / sizeof(float);
+
+ queue_ptr stream = ctx.stream();
+
+ if (dst->type == GGML_TYPE_F16) {
+ im2col_sycl_f16((const float *) src1->data, (sycl::half *) dst->data, IW, IH, OW, OH, KW, KH, IC, batch,
+ batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream);
+ } else {
+ im2col_sycl_f32((const float *) src1->data, (float *) dst->data, IW, IH, OW, OH, KW, KH, IC, batch,
+ batch_offset, delta_offset, s0, s1, p0, p1, d0, d1, stream);
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/im2col.hpp b/llama.cpp/ggml/src/ggml-sycl/im2col.hpp
new file mode 100644
index 0000000..dbbb248
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/im2col.hpp
@@ -0,0 +1,21 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_IM2COL_HPP
+#define GGML_SYCL_IM2COL_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_im2col(
+ ggml_backend_sycl_context & ctx, ggml_tensor *dst);
+
+#endif // GGML_SYCL_IM2COL_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/mmq.cpp b/llama.cpp/ggml/src/ggml-sycl/mmq.cpp
new file mode 100644
index 0000000..ffb272a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/mmq.cpp
@@ -0,0 +1,3030 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include "mmq.hpp"
+#include "vecdotq.hpp"
+
+typedef void (*allocate_tiles_sycl_t)(
+ int** x_ql,
+ sycl::half2** x_dm,
+ int** x_qh,
+ int** x_sc);
+typedef void (*load_tiles_sycl_t)(
+ const void* __restrict__ vx,
+ int* __restrict__ x_ql,
+ sycl::half2* __restrict__ x_dm,
+ int* __restrict__ x_qh,
+ int* __restrict__ x_sc,
+ const int& i_offset,
+ const int& i_max,
+ const int& k,
+ const int& blocks_per_row);
+typedef float (*vec_dot_q_mul_mat_sycl_t)(
+ const int* __restrict__ x_ql,
+ const sycl::half2* __restrict__ x_dm,
+ const int* __restrict__ x_qh,
+ const int* __restrict__ x_sc,
+ const int* __restrict__ y_qs,
+ const sycl::half2* __restrict__ y_ms,
+ const int& i,
+ const int& j,
+ const int& k);
+
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q4_0(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_qs_q4_0, float *tile_x_d_q4_0) {
+ (void)x_qh; (void)x_sc;
+
+ *x_ql = tile_x_qs_q4_0;
+ *x_dm = (sycl::half2 *)tile_x_d_q4_0;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q4_0(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh; (void)x_sc;
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI4_0;
+ const int kqsx = k % QI4_0;
+
+ const block_q4_0 * bx0 = (const block_q4_0 *) vx;
+
+ float * x_dmf = (float *) x_dm;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbx;
+
+ x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
+ // x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbx] = bxi->d;
+ }
+
+ const int blocks_per_tile_x_row = WARP_SIZE / QI4_0;
+ const int kbxd = k % blocks_per_tile_x_row;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_0) {
+ int i = i0 + i_offset * QI4_0 + k / blocks_per_tile_x_row;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q4_0 * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dmf[i * (WARP_SIZE/QI4_0) + i / QI4_0 + kbxd] = bxi->d;
+ }
+}
+
+static __dpct_inline__ float vec_dot_q4_0_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh; (void)x_sc;
+
+ const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
+ const float * x_dmf = (const float *) x_dm;
+
+ int u[2*VDR_Q4_0_Q8_1_MMQ];
+
+#pragma unroll
+ for (int l = 0; l < VDR_Q4_0_Q8_1_MMQ; ++l) {
+ u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
+ u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_0) % WARP_SIZE];
+ }
+
+ return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMQ>
+ (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dmf[i * (WARP_SIZE/QI4_0) + i/QI4_0 + k/QI4_0],
+ y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q4_1(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_qs_q4_1, sycl::half2 *tile_x_dm_q4_1) {
+ (void)x_qh; (void)x_sc;
+
+ *x_ql = tile_x_qs_q4_1;
+ *x_dm = tile_x_dm_q4_1;
+}
+
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q4_1(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh; (void)x_sc;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI4_1;
+ const int kqsx = k % QI4_1;
+
+ const block_q4_1 * bx0 = (const block_q4_1 *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbx;
+
+ x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
+ }
+
+ const int blocks_per_tile_x_row = WARP_SIZE / QI4_1;
+ const int kbxd = k % blocks_per_tile_x_row;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_1) {
+ int i = i0 + i_offset * QI4_1 + k / blocks_per_tile_x_row;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q4_1 * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dm[i * (WARP_SIZE/QI4_1) + i / QI4_1 + kbxd] = bxi->dm;
+ }
+}
+
+static __dpct_inline__ float vec_dot_q4_1_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh; (void)x_sc;
+
+ const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
+
+ int u[2*VDR_Q4_1_Q8_1_MMQ];
+
+#pragma unroll
+ for (int l = 0; l < VDR_Q4_1_Q8_1_MMQ; ++l) {
+ u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
+ u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI4_1) % WARP_SIZE];
+ }
+
+ return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMQ>
+ (&x_ql[i * (WARP_SIZE + 1) + k], u, x_dm[i * (WARP_SIZE/QI4_1) + i/QI4_1 + k/QI4_1],
+ y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q5_0(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_ql_q5_0, float *tile_x_d_q5_0) {
+ (void)x_qh; (void)x_sc;
+
+ *x_ql = tile_x_ql_q5_0;
+ *x_dm = (sycl::half2 *)tile_x_d_q5_0;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q5_0(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh; (void)x_sc;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI5_0;
+ const int kqsx = k % QI5_0;
+
+ const block_q5_0 * bx0 = (const block_q5_0 *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbx;
+
+ const int ql = get_int_from_uint8(bxi->qs, kqsx);
+ const int qh = get_int_from_uint8(bxi->qh, 0) >> (4 * (k % QI5_0));
+
+ int qs0 = (ql >> 0) & 0x0F0F0F0F;
+ qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
+ qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
+ qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
+ qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
+ qs0 = dpct::vectorized_binary<sycl::char4>(
+ qs0, 0x10101010, dpct::sub_sat()); // subtract 16
+
+ x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
+
+ int qs1 = (ql >> 4) & 0x0F0F0F0F;
+ qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
+ qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
+ qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
+ qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
+ qs1 = dpct::vectorized_binary<sycl::char4>(
+ qs1, 0x10101010, dpct::sub_sat()); // subtract 16
+
+ x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
+ }
+
+ const int blocks_per_tile_x_row = WARP_SIZE / QI5_0;
+ const int kbxd = k % blocks_per_tile_x_row;
+ float * x_dmf = (float *) x_dm;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_0) {
+ int i = i0 + i_offset * QI5_0 + k / blocks_per_tile_x_row;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q5_0 * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dmf[i * (WARP_SIZE/QI5_0) + i / QI5_0 + kbxd] = bxi->d;
+ }
+}
+
+static __dpct_inline__ float vec_dot_q5_0_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh; (void)x_sc;
+
+ const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
+ const int index_bx = i * (WARP_SIZE/QI5_0) + i/QI5_0 + k/QI5_0;
+ const float * x_dmf = (const float *) x_dm;
+ const float * y_df = (const float *) y_ds;
+
+ int u[2*VDR_Q5_0_Q8_1_MMQ];
+
+#pragma unroll
+ for (int l = 0; l < VDR_Q5_0_Q8_1_MMQ; ++l) {
+ u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
+ u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_0) % WARP_SIZE];
+ }
+
+ return vec_dot_q8_0_q8_1_impl<QR5_0*VDR_Q5_0_Q8_1_MMQ>
+ (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dmf[index_bx], y_df[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q5_1(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_ql_q5_1, sycl::half2 *tile_x_dm_q5_1) {
+ (void)x_qh; (void)x_sc;
+
+ *x_ql = tile_x_ql_q5_1;
+ *x_dm = tile_x_dm_q5_1;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q5_1(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh; (void)x_sc;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI5_1;
+ const int kqsx = k % QI5_1;
+
+ const block_q5_1 * bx0 = (const block_q5_1 *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbx;
+
+ const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
+ const int qh = get_int_from_uint8_aligned(bxi->qh, 0) >> (4 * (k % QI5_1));
+
+ int qs0 = (ql >> 0) & 0x0F0F0F0F;
+ qs0 |= (qh << 4) & 0x00000010; // 0 -> 4
+ qs0 |= (qh << 11) & 0x00001000; // 1 -> 12
+ qs0 |= (qh << 18) & 0x00100000; // 2 -> 20
+ qs0 |= (qh << 25) & 0x10000000; // 3 -> 28
+
+ x_ql[i * (2*WARP_SIZE + 1) + 2*k+0] = qs0;
+
+ int qs1 = (ql >> 4) & 0x0F0F0F0F;
+ qs1 |= (qh >> 12) & 0x00000010; // 16 -> 4
+ qs1 |= (qh >> 5) & 0x00001000; // 17 -> 12
+ qs1 |= (qh << 2) & 0x00100000; // 18 -> 20
+ qs1 |= (qh << 9) & 0x10000000; // 19 -> 28
+
+ x_ql[i * (2*WARP_SIZE + 1) + 2*k+1] = qs1;
+ }
+
+ const int blocks_per_tile_x_row = WARP_SIZE / QI5_1;
+ const int kbxd = k % blocks_per_tile_x_row;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_1) {
+ int i = i0 + i_offset * QI5_1 + k / blocks_per_tile_x_row;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q5_1 * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dm[i * (WARP_SIZE/QI5_1) + i / QI5_1 + kbxd] = bxi->dm;
+ }
+}
+
+static __dpct_inline__ float vec_dot_q5_1_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh; (void)x_sc;
+
+ const int kyqs = k % (QI8_1/2) + QI8_1 * (k / (QI8_1/2));
+ const int index_bx = i * (WARP_SIZE/QI5_1) + + i/QI5_1 + k/QI5_1;
+
+ int u[2*VDR_Q5_1_Q8_1_MMQ];
+
+#pragma unroll
+ for (int l = 0; l < VDR_Q5_1_Q8_1_MMQ; ++l) {
+ u[2*l+0] = y_qs[j * WARP_SIZE + (kyqs + l) % WARP_SIZE];
+ u[2*l+1] = y_qs[j * WARP_SIZE + (kyqs + l + QI5_1) % WARP_SIZE];
+ }
+
+ return vec_dot_q8_1_q8_1_impl<QR5_1*VDR_Q5_1_Q8_1_MMQ>
+ (&x_ql[i * (2*WARP_SIZE + 1) + 2 * k], u, x_dm[index_bx], y_ds[j * (WARP_SIZE/QI8_1) + (2*k/QI8_1) % (WARP_SIZE/QI8_1)]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q8_0(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_qs_q8_0, float *tile_x_d_q8_0) {
+ (void)x_qh; (void)x_sc;
+
+ *x_ql = tile_x_qs_q8_0;
+ *x_dm = (sycl::half2 *)tile_x_d_q8_0;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q8_0(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh; (void)x_sc;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI8_0;
+ const int kqsx = k % QI8_0;
+ float * x_dmf = (float *) x_dm;
+
+ const block_q8_0 * bx0 = (const block_q8_0 *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbx;
+
+ x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_int8(bxi->qs, kqsx);
+ }
+
+ const int blocks_per_tile_x_row = WARP_SIZE / QI8_0;
+ const int kbxd = k % blocks_per_tile_x_row;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI8_0) {
+ int i = i0 + i_offset * QI8_0 + k / blocks_per_tile_x_row;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q8_0 * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dmf[i * (WARP_SIZE/QI8_0) + i / QI8_0 + kbxd] = bxi->d;
+ }
+}
+
+static __dpct_inline__ float vec_dot_q8_0_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh; (void)x_sc;
+
+ const float * x_dmf = (const float *) x_dm;
+ const float * y_df = (const float *) y_ds;
+
+ return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMQ>
+ (&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[j * WARP_SIZE + k], x_dmf[i * (WARP_SIZE/QI8_0) + i/QI8_0 + k/QI8_0],
+ y_df[j * (WARP_SIZE/QI8_1) + k/QI8_1]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q2_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_ql_q2_K, sycl::half2 *tile_x_dm_q2_K,
+ int *tile_x_sc_q2_K) {
+ (void)x_qh;
+
+ *x_ql = tile_x_ql_q2_K;
+ *x_dm = tile_x_dm_q2_K;
+ *x_sc = tile_x_sc_q2_K;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q2_K(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI2_K;
+ const int kqsx = k % QI2_K;
+
+ const block_q2_K * bx0 = (const block_q2_K *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q2_K * bxi = bx0 + i*blocks_per_row + kbx;
+
+ x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
+ }
+
+ const int blocks_per_tile_x_row = WARP_SIZE / QI2_K;
+ const int kbxd = k % blocks_per_tile_x_row;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI2_K) {
+ int i = (i0 + i_offset * QI2_K + k / blocks_per_tile_x_row) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q2_K * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dm[i * (WARP_SIZE/QI2_K) + i / QI2_K + kbxd] = bxi->dm;
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
+ int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q2_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI2_K/4);
+
+ x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = get_int_from_uint8_aligned(bxi->scales, k % (QI2_K/4));
+ }
+}
+
+#define VDR_Q2_K_Q8_1_MMQ 2
+// contiguous u/y values
+static __dpct_inline__ float
+vec_dot_q2_K_q8_1_impl_mmq(const int *__restrict__ v, const int *__restrict__ u,
+ const uint8_t *__restrict__ scales,
+ const sycl::half2 &dm2, const float &d8) {
+
+ int sumi_d = 0;
+ int sumi_m = 0;
+
+#pragma unroll
+ for (int i0 = 0; i0 < QI8_1; i0 += QI8_1/2) {
+ int sumi_d_sc = 0;
+
+ const int sc = scales[i0 / (QI8_1/2)];
+
+ // fill int with 4x m
+ int m = sc >> 4;
+ m |= m << 8;
+ m |= m << 16;
+
+#pragma unroll
+ for (int i = i0; i < i0 + QI8_1/2; ++i) {
+ sumi_d_sc = dpct::dp4a(v[i], u[i], sumi_d_sc); // SIMD dot product
+ sumi_m = dpct::dp4a(m, u[i],
+ sumi_m); // multiply sum of q8_1 values with m
+ }
+
+ sumi_d += sumi_d_sc * (sc & 0xF);
+ }
+
+ const sycl::float2 dm2f =
+ dm2.convert<float, sycl::rounding_mode::automatic>();
+
+ return d8 * (dm2f.x() * sumi_d - dm2f.y() * sumi_m);
+}
+
+static __dpct_inline__ float vec_dot_q2_K_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh;
+
+ const int kbx = k / QI2_K;
+ const int ky = (k % QI2_K) * QR2_K;
+ const float * y_df = (const float *) y_ds;
+
+ int v[QR2_K*VDR_Q2_K_Q8_1_MMQ];
+
+ const int kqsx = i * (WARP_SIZE + 1) + kbx*QI2_K + (QI2_K/2) * (ky/(2*QI2_K)) + ky % (QI2_K/2);
+ const int shift = 2 * ((ky % (2*QI2_K)) / (QI2_K/2));
+
+#pragma unroll
+ for (int l = 0; l < QR2_K*VDR_Q2_K_Q8_1_MMQ; ++l) {
+ v[l] = (x_ql[kqsx + l] >> shift) & 0x03030303;
+ }
+
+ const uint8_t * scales = ((const uint8_t *) &x_sc[i * (WARP_SIZE/4) + i/4 + kbx*4]) + ky/4;
+
+ const int index_y = j * WARP_SIZE + (QR2_K*k) % WARP_SIZE;
+ return vec_dot_q2_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dm[i * (WARP_SIZE/QI2_K) + i/QI2_K + kbx], y_df[index_y/QI8_1]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q3_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_ql_q3_K, sycl::half2 *tile_x_dm_q3_K,
+ int *tile_x_qh_q3_K, int *tile_x_sc_q3_K) {
+
+ *x_ql = tile_x_ql_q3_K;
+ *x_dm = tile_x_dm_q3_K;
+ *x_qh = tile_x_qh_q3_K;
+ *x_sc = tile_x_sc_q3_K;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q3_K(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI3_K;
+ const int kqsx = k % QI3_K;
+
+ const block_q3_K * bx0 = (const block_q3_K *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q3_K * bxi = bx0 + i*blocks_per_row + kbx;
+
+ x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8(bxi->qs, kqsx);
+ }
+
+ const int blocks_per_tile_x_row = WARP_SIZE / QI3_K;
+ const int kbxd = k % blocks_per_tile_x_row;
+ float * x_dmf = (float *) x_dm;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI3_K) {
+ int i = (i0 + i_offset * QI3_K + k / blocks_per_tile_x_row) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q3_K * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dmf[i * (WARP_SIZE/QI3_K) + i / QI3_K + kbxd] = bxi->d;
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 2) {
+ int i = i0 + i_offset * 2 + k / (WARP_SIZE/2);
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/2)) / (QI3_K/2);
+
+ // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
+ x_qh[i * (WARP_SIZE/2) + i / 2 + k % (WARP_SIZE/2)] = ~get_int_from_uint8(bxi->hmask, k % (QI3_K/2));
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 4) {
+ int i = i0 + i_offset * 4 + k / (WARP_SIZE/4);
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q3_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/4)) / (QI3_K/4);
+
+ const int ksc = k % (QI3_K/4);
+
+ const int ksc_low = ksc % (QI3_K/8);
+ const int shift_low = 4 * (ksc / (QI3_K/8));
+ const int sc_low = (get_int_from_uint8(bxi->scales, ksc_low) >> shift_low) & 0x0F0F0F0F;
+
+ const int ksc_high = QI3_K/8;
+ const int shift_high = 2 * ksc;
+ const int sc_high = ((get_int_from_uint8(bxi->scales, ksc_high) >> shift_high) << 4) & 0x30303030;
+
+ const int sc = dpct::vectorized_binary<sycl::char4>(
+ sc_low | sc_high, 0x20202020, dpct::sub_sat());
+
+ x_sc[i * (WARP_SIZE/4) + i / 4 + k % (WARP_SIZE/4)] = sc;
+ }
+}
+
+#define VDR_Q3_K_Q8_1_MMQ 2
+// contiguous u/y values
+static __dpct_inline__ float
+vec_dot_q3_K_q8_1_impl_mmq(const int *__restrict__ v, const int *__restrict__ u,
+ const int8_t *__restrict__ scales, const float &d3,
+ const float &d8) {
+
+ int sumi = 0;
+
+#pragma unroll
+ for (int i0 = 0; i0 < QR3_K*VDR_Q3_K_Q8_1_MMQ; i0 += QI8_1/2) {
+ int sumi_sc = 0;
+
+ for (int i = i0; i < i0 + QI8_1/2; ++i) {
+ sumi_sc = dpct::dp4a(v[i], u[i], sumi_sc); // SIMD dot product
+ }
+
+ sumi += sumi_sc * scales[i0 / (QI8_1/2)];
+ }
+
+ return d3*d8 * sumi;
+}
+
+static __dpct_inline__ float vec_dot_q3_K_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+
+ const int kbx = k / QI3_K;
+ const int ky = (k % QI3_K) * QR3_K;
+ const float * x_dmf = (const float *) x_dm;
+ const float * y_df = (const float *) y_ds;
+
+ const int8_t * scales = ((const int8_t *) (x_sc + i * (WARP_SIZE/4) + i/4 + kbx*4)) + ky/4;
+
+ int v[QR3_K*VDR_Q3_K_Q8_1_MMQ];
+
+#pragma unroll
+ for (int l = 0; l < QR3_K*VDR_Q3_K_Q8_1_MMQ; ++l) {
+ const int kqsx = i * (WARP_SIZE + 1) + kbx*QI3_K + (QI3_K/2) * (ky/(2*QI3_K)) + ky % (QI3_K/2);
+ const int shift = 2 * ((ky % 32) / 8);
+ const int vll = (x_ql[kqsx + l] >> shift) & 0x03030303;
+
+ const int vh = x_qh[i * (WARP_SIZE/2) + i/2 + kbx * (QI3_K/2) + (ky+l)%8] >> ((ky+l) / 8);
+ const int vlh = (vh << 2) & 0x04040404;
+
+ v[l] = dpct::vectorized_binary<sycl::char4>(vll, vlh, dpct::sub_sat());
+ }
+
+ const int index_y = j * WARP_SIZE + (k*QR3_K) % WARP_SIZE;
+ return vec_dot_q3_K_q8_1_impl_mmq(v, &y_qs[index_y], scales, x_dmf[i * (WARP_SIZE/QI3_K) + i/QI3_K + kbx], y_df[index_y/QI8_1]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q4_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_ql_q4_K, sycl::half2 *tile_x_dm_q4_K,
+ int *tile_x_sc_q4_K) {
+ (void)x_qh;
+
+ *x_ql = tile_x_ql_q4_K;
+ *x_dm = tile_x_dm_q4_K;
+ *x_sc = tile_x_sc_q4_K;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q4_K(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI4_K; // == 0 if QK_K == 256
+ const int kqsx = k % QI4_K; // == k if QK_K == 256
+
+ const block_q4_K * bx0 = (const block_q4_K *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q4_K * bxi = bx0 + i*blocks_per_row + kbx;
+
+ x_ql[i * (WARP_SIZE + 1) + k] = get_int_from_uint8_aligned(bxi->qs, kqsx);
+ }
+
+ constexpr int blocks_per_tile_x_row = QI4_K > WARP_SIZE ? 1 : WARP_SIZE / QI4_K; // == 1 if QK_K == 256
+ const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI4_K) {
+ int i = (i0 + i_offset * QI4_K + k / blocks_per_tile_x_row) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q4_K * bxi = bx0 + i*blocks_per_row + kbxd;
+
+#if QK_K == 256
+ x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = bxi->dm;
+#else
+ x_dm[i * (WARP_SIZE/QI4_K) + i / QI4_K + kbxd] = {bxi->dm[0], bxi->dm[1]};
+#endif
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
+ int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q4_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI4_K/8);
+
+ const int * scales = (const int *) bxi->scales;
+
+ const int ksc = k % (WARP_SIZE/8);
+
+ // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
+ int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
+ scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
+
+ x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
+ }
+}
+
+
+#define VDR_Q4_K_Q8_1_MMQ 8
+
+// contiguous u/y values
+static __dpct_inline__ float vec_dot_q4_K_q8_1_impl_mmq(
+ const int *__restrict__ v, const int *__restrict__ u,
+ const uint8_t *__restrict__ sc, const uint8_t *__restrict__ m,
+ const sycl::half2 &dm4, const sycl::half2 *__restrict__ ds8) {
+
+ float sumf_d = 0.0f;
+ float sumf_m = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR4_K*VDR_Q4_K_Q8_1_MMQ/QI8_1; ++i) {
+ int sumi_d = 0;
+
+#pragma unroll
+ for (int j = 0; j < QI8_1; ++j) {
+ sumi_d = dpct::dp4a((v[j] >> (4 * i)) & 0x0F0F0F0F,
+ u[i * QI8_1 + j], sumi_d); // SIMD dot product
+ }
+
+ const sycl::float2 ds8f =
+ ds8[i].convert<float, sycl::rounding_mode::automatic>();
+
+ sumf_d += ds8f.x() * (sc[i] * sumi_d);
+ sumf_m += ds8f.y() * m[i]; // sum of q8_1 block * q4_K min val
+ }
+
+ const sycl::float2 dm4f =
+ dm4.convert<float, sycl::rounding_mode::automatic>();
+
+ return dm4f.x() * sumf_d - dm4f.y() * sumf_m;
+}
+
+
+static __dpct_inline__ float vec_dot_q4_K_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh;
+
+ const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2*((k % 16) / 8);
+
+ const int index_y = j * WARP_SIZE + (QR4_K*k) % WARP_SIZE;
+ return vec_dot_q4_K_q8_1_impl_mmq(&x_ql[i * (WARP_SIZE + 1) + k], &y_qs[index_y], sc, sc+8,
+ x_dm[i * (WARP_SIZE/QI4_K) + i/QI4_K], &y_ds[index_y/QI8_1]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q5_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_ql_q5_K, sycl::half2 *tile_x_dm_q5_K,
+ int *tile_x_sc_q5_K) {
+ (void)x_qh;
+
+ *x_ql = tile_x_ql_q5_K;
+ *x_dm = tile_x_dm_q5_K;
+ *x_sc = tile_x_sc_q5_K;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q5_K(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI5_K; // == 0 if QK_K == 256
+ const int kqsx = k % QI5_K; // == k if QK_K == 256
+
+ const block_q5_K * bx0 = (const block_q5_K *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q5_K * bxi = bx0 + i*blocks_per_row + kbx;
+ const int ky = QR5_K*kqsx;
+
+ const int ql = get_int_from_uint8_aligned(bxi->qs, kqsx);
+ const int ql0 = (ql >> 0) & 0x0F0F0F0F;
+ const int ql1 = (ql >> 4) & 0x0F0F0F0F;
+
+ const int qh = get_int_from_uint8_aligned(bxi->qh, kqsx % (QI5_K/4));
+ const int qh0 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 0)) << 4) & 0x10101010;
+ const int qh1 = ((qh >> (2 * (kqsx / (QI5_K/4)) + 1)) << 4) & 0x10101010;
+
+ const int kq0 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + 0;
+ const int kq1 = ky - ky % (QI5_K/2) + k % (QI5_K/4) + (QI5_K/4);
+
+ x_ql[i * (2*WARP_SIZE + 1) + kq0] = ql0 | qh0;
+ x_ql[i * (2*WARP_SIZE + 1) + kq1] = ql1 | qh1;
+ }
+
+ constexpr int blocks_per_tile_x_row = QI5_K > WARP_SIZE ? 1 : WARP_SIZE / QI5_K; // == 1 if QK_K == 256
+ const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI5_K) {
+ int i = (i0 + i_offset * QI5_K + k / blocks_per_tile_x_row) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q5_K * bxi = bx0 + i*blocks_per_row + kbxd;
+
+#if QK_K == 256
+ x_dm[i * (WARP_SIZE/QI5_K) + i / QI5_K + kbxd] = bxi->dm;
+#endif
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
+ int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q5_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / (QI5_K/8);
+
+ const int * scales = (const int *) bxi->scales;
+
+ const int ksc = k % (WARP_SIZE/8);
+
+ // scale arrangement after the following two lines: sc0,...,sc3, sc4,...,sc7, m0,...,m3, m4,...,m8
+ int scales8 = (scales[(ksc%2) + (ksc!=0)] >> (4 * (ksc & (ksc/2)))) & 0x0F0F0F0F; // lower 4 bits
+ scales8 |= (scales[ksc/2] >> (2 * (ksc % 2))) & 0x30303030; // upper 2 bits
+
+ x_sc[i * (WARP_SIZE/8) + i / 8 + ksc] = scales8;
+ }
+}
+
+#define VDR_Q5_K_Q8_1_MMQ 8
+
+// contiguous u/y values
+static __dpct_inline__ float vec_dot_q5_K_q8_1_impl_mmq(
+ const int *__restrict__ v, const int *__restrict__ u,
+ const uint8_t *__restrict__ sc, const uint8_t *__restrict__ m,
+ const sycl::half2 &dm4, const sycl::half2 *__restrict__ ds8) {
+
+ float sumf_d = 0.0f;
+ float sumf_m = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR5_K*VDR_Q5_K_Q8_1_MMQ/QI8_1; ++i) {
+ int sumi_d = 0;
+
+#pragma unroll
+ for (int j = 0; j < QI8_1; ++j) {
+ sumi_d = dpct::dp4a(v[i * QI8_1 + j], u[i * QI8_1 + j],
+ sumi_d); // SIMD dot product
+ }
+
+ const sycl::float2 ds8f =
+ ds8[i].convert<float, sycl::rounding_mode::automatic>();
+
+ sumf_d += ds8f.x() * (sc[i] * sumi_d);
+ sumf_m += ds8f.y() * m[i]; // sum of q8_1 block * q4_K min val
+ }
+
+ const sycl::float2 dm4f =
+ dm4.convert<float, sycl::rounding_mode::automatic>();
+
+ return dm4f.x() * sumf_d - dm4f.y() * sumf_m;
+}
+
+static __dpct_inline__ float vec_dot_q5_K_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh;
+
+ const uint8_t * sc = ((const uint8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/16]) + 2 * ((k % 16) / 8);
+
+ const int index_x = i * (QR5_K*WARP_SIZE + 1) + QR5_K*k;
+ const int index_y = j * WARP_SIZE + (QR5_K*k) % WARP_SIZE;
+ return vec_dot_q5_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, sc+8,
+ x_dm[i * (WARP_SIZE/QI5_K) + i/QI5_K], &y_ds[index_y/QI8_1]);
+}
+
+template <int mmq_y>
+static __dpct_inline__ void
+allocate_tiles_q6_K(int **x_ql, sycl::half2 **x_dm, int **x_qh, int **x_sc,
+ int *tile_x_ql, sycl::half2 *tile_x_dm, int *tile_x_sc) {
+ (void)x_qh;
+
+ *x_ql = tile_x_ql;
+ *x_dm = tile_x_dm;
+ *x_sc = tile_x_sc;
+}
+
+template <int mmq_y, int nwarps, bool need_check>
+static __dpct_inline__ void
+load_tiles_q6_K(const void *__restrict__ vx, int *__restrict__ x_ql,
+ sycl::half2 *__restrict__ x_dm, int *__restrict__ x_qh,
+ int *__restrict__ x_sc, const int &i_offset, const int &i_max,
+ const int &k, const int &blocks_per_row) {
+ (void)x_qh;
+
+ GGML_SYCL_ASSUME(i_offset >= 0);
+ GGML_SYCL_ASSUME(i_offset < nwarps);
+ GGML_SYCL_ASSUME(k >= 0);
+ GGML_SYCL_ASSUME(k < WARP_SIZE);
+
+ const int kbx = k / QI6_K; // == 0 if QK_K == 256
+ const int kqsx = k % QI6_K; // == k if QK_K == 256
+
+ const block_q6_K * bx0 = (const block_q6_K *) vx;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps) {
+ int i = i0 + i_offset;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q6_K * bxi = bx0 + i*blocks_per_row + kbx;
+ const int ky = QR6_K*kqsx;
+
+ const int ql = get_int_from_uint8(bxi->ql, kqsx);
+ const int ql0 = (ql >> 0) & 0x0F0F0F0F;
+ const int ql1 = (ql >> 4) & 0x0F0F0F0F;
+
+ const int qh = get_int_from_uint8(bxi->qh, (QI6_K/4) * (kqsx / (QI6_K/2)) + kqsx % (QI6_K/4));
+ const int qh0 = ((qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) << 4) & 0x30303030;
+ const int qh1 = (qh >> (2 * ((kqsx % (QI6_K/2)) / (QI6_K/4)))) & 0x30303030;
+
+ const int kq0 = ky - ky % QI6_K + k % (QI6_K/2) + 0;
+ const int kq1 = ky - ky % QI6_K + k % (QI6_K/2) + (QI6_K/2);
+
+ x_ql[i * (2 * WARP_SIZE + 1) + kq0] =
+ dpct::vectorized_binary<sycl::char4>(ql0 | qh0, 0x20202020,
+ dpct::sub_sat());
+ x_ql[i * (2 * WARP_SIZE + 1) + kq1] =
+ dpct::vectorized_binary<sycl::char4>(ql1 | qh1, 0x20202020,
+ dpct::sub_sat());
+ }
+
+ constexpr int blocks_per_tile_x_row = QI6_K > WARP_SIZE ? 1 : WARP_SIZE / QI6_K; // == 1 if QK_K == 256
+ const int kbxd = k % blocks_per_tile_x_row; // == 0 if QK_K == 256
+ float * x_dmf = (float *) x_dm;
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * QI6_K) {
+ int i = (i0 + i_offset * QI6_K + k / blocks_per_tile_x_row) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q6_K * bxi = bx0 + i*blocks_per_row + kbxd;
+
+ x_dmf[i * (WARP_SIZE/QI6_K) + i / QI6_K + kbxd] = bxi->d;
+ }
+
+#pragma unroll
+ for (int i0 = 0; i0 < mmq_y; i0 += nwarps * 8) {
+ int i = (i0 + i_offset * 8 + k / (WARP_SIZE/8)) % mmq_y;
+
+ if (need_check) {
+ i = sycl::min(i, i_max);
+ }
+
+ const block_q6_K * bxi = bx0 + i*blocks_per_row + (k % (WARP_SIZE/8)) / 4;
+
+ x_sc[i * (WARP_SIZE/8) + i / 8 + k % (WARP_SIZE/8)] = get_int_from_int8(bxi->scales, k % (QI6_K/8));
+ }
+}
+
+#define VDR_Q6_K_Q8_1_MMQ 8
+
+// contiguous u/y values
+static __dpct_inline__ float
+vec_dot_q6_K_q8_1_impl_mmq(const int *__restrict__ v, const int *__restrict__ u,
+ const int8_t *__restrict__ sc, const float &d6,
+ const float *__restrict__ d8) {
+
+ float sumf_d = 0.0f;
+
+#pragma unroll
+ for (int i0 = 0; i0 < VDR_Q6_K_Q8_1_MMQ; i0 += 4) {
+ sycl::int2 sumi_d = {0, 0}; // 2 q6_K scales per q8_1 scale
+
+#pragma unroll
+ for (int i = i0; i < i0 + 2; ++i) {
+ sumi_d.x() = dpct::dp4a(v[2 * i + 0], u[2 * i + 0],
+ sumi_d.x()); // SIMD dot product
+ sumi_d.x() = dpct::dp4a(v[2 * i + 1], u[2 * i + 1],
+ sumi_d.x()); // SIMD dot product
+
+ sumi_d.y() = dpct::dp4a(v[2 * i + 4], u[2 * i + 4],
+ sumi_d.y()); // SIMD dot product
+ sumi_d.y() = dpct::dp4a(v[2 * i + 5], u[2 * i + 5],
+ sumi_d.y()); // SIMD dot product
+ }
+
+ sumf_d += d8[i0 / 4] *
+ (sc[i0 / 2 + 0] * sumi_d.x() + sc[i0 / 2 + 1] * sumi_d.y());
+ }
+
+ return d6 * sumf_d;
+}
+
+static __dpct_inline__ float vec_dot_q6_K_q8_1_mul_mat(
+ const int *__restrict__ x_ql, const sycl::half2 *__restrict__ x_dm,
+ const int *__restrict__ x_qh, const int *__restrict__ x_sc,
+ const int *__restrict__ y_qs, const sycl::half2 *__restrict__ y_ds,
+ const int &i, const int &j, const int &k) {
+ (void)x_qh;
+
+ const float * x_dmf = (const float *) x_dm;
+ const float * y_df = (const float *) y_ds;
+
+ const int8_t * sc = ((const int8_t *) &x_sc[i * (WARP_SIZE/8) + i/8 + k/8]);
+
+ const int index_x = i * (QR6_K*WARP_SIZE + 1) + QR6_K*k;
+ const int index_y = j * WARP_SIZE + (QR6_K*k) % WARP_SIZE;
+ return vec_dot_q6_K_q8_1_impl_mmq(&x_ql[index_x], &y_qs[index_y], sc, x_dmf[i * (WARP_SIZE/QI6_K) + i/QI6_K], &y_df[index_y/QI8_1]);
+}
+
+template <int qk, int qr, int qi, bool need_sum, typename block_q_t, int mmq_x,
+ int mmq_y, int nwarps, load_tiles_sycl_t load_tiles, int vdr,
+ vec_dot_q_mul_mat_sycl_t vec_dot>
+/*
+DPCT1110:8: The total declared local variable size in device function mul_mat_q
+exceeds 128 bytes and may cause high register pressure. Consult with your
+hardware vendor to find the total register size available and adjust the code,
+or use smaller sub-group size to avoid high register pressure.
+*/
+static __dpct_inline__ void
+mul_mat_q(const void *__restrict__ vx, const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols_x, const int nrows_x,
+ const int ncols_y, const int nrows_y, const int nrows_dst,
+ int *tile_x_ql, sycl::half2 *tile_x_dm, int *tile_x_qh,
+ int *tile_x_sc, const sycl::nd_item<3> &item_ct1, int *tile_y_qs,
+ sycl::half2 *tile_y_ds) {
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ const int blocks_per_row_x = ncols_x / qk;
+ const int blocks_per_col_y = nrows_y / QK8_1;
+ const int blocks_per_warp = WARP_SIZE / qi;
+
+ const int & ncols_dst = ncols_y;
+
+ const int row_dst_0 = item_ct1.get_group(2) * mmq_y;
+ const int & row_x_0 = row_dst_0;
+
+ const int col_dst_0 = item_ct1.get_group(1) * mmq_x;
+ const int & col_y_0 = col_dst_0;
+
+ float sum[mmq_y/WARP_SIZE][mmq_x/nwarps] = {{0.0f}};
+
+ for (int ib0 = 0; ib0 < blocks_per_row_x; ib0 += blocks_per_warp) {
+
+ load_tiles(x + row_x_0 * blocks_per_row_x + ib0, tile_x_ql, tile_x_dm,
+ tile_x_qh, tile_x_sc, item_ct1.get_local_id(1),
+ nrows_x - row_x_0 - 1, item_ct1.get_local_id(2),
+ blocks_per_row_x);
+
+#pragma unroll
+ for (int ir = 0; ir < qr; ++ir) {
+ const int kqs = ir * WARP_SIZE + item_ct1.get_local_id(2);
+ const int kbxd = kqs / QI8_1;
+
+#pragma unroll
+ for (int i = 0; i < mmq_x; i += nwarps) {
+ const int col_y_eff = dpct::min(
+ (unsigned int)(col_y_0 + item_ct1.get_local_id(1) + i),
+ ncols_y - 1); // to prevent out-of-bounds memory accesses
+
+ const block_q8_1 * by0 = &y[col_y_eff*blocks_per_col_y + ib0 * (qk/QK8_1) + kbxd];
+
+ const int index_y = (item_ct1.get_local_id(1) + i) * WARP_SIZE +
+ kqs % WARP_SIZE;
+ tile_y_qs[index_y] = get_int_from_int8_aligned(
+ by0->qs, item_ct1.get_local_id(2) % QI8_1);
+ }
+
+#pragma unroll
+ for (int ids0 = 0; ids0 < mmq_x; ids0 += nwarps * QI8_1) {
+ const int ids =
+ (ids0 + item_ct1.get_local_id(1) * QI8_1 +
+ item_ct1.get_local_id(2) / (WARP_SIZE / QI8_1)) %
+ mmq_x;
+ const int kby = item_ct1.get_local_id(2) % (WARP_SIZE / QI8_1);
+ const int col_y_eff = sycl::min(col_y_0 + ids, ncols_y - 1);
+
+ // if the sum is not needed it's faster to transform the scale to f32 ahead of time
+ const sycl::half2 *dsi_src =
+ &y[col_y_eff * blocks_per_col_y + ib0 * (qk / QK8_1) +
+ ir * (WARP_SIZE / QI8_1) + kby]
+ .ds;
+ sycl::half2 *dsi_dst =
+ &tile_y_ds[ids * (WARP_SIZE / QI8_1) + kby];
+ if (need_sum) {
+ *dsi_dst = *dsi_src;
+ } else {
+ float * dfi_dst = (float *) dsi_dst;
+ *dfi_dst = (*dsi_src)[0];
+ }
+ }
+
+ /*
+ DPCT1118:9: SYCL group functions and algorithms must be encountered
+ in converged control flow. You may need to adjust the code.
+ */
+ /*
+ DPCT1065:56: Consider replacing sycl::nd_item::barrier() with
+ sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
+ better performance if there is no access to global memory.
+ */
+ item_ct1.barrier();
+
+// #pragma unroll // unrolling this loop causes too much register pressure
+ for (int k = ir*WARP_SIZE/qr; k < (ir+1)*WARP_SIZE/qr; k += vdr) {
+#pragma unroll
+ for (int j = 0; j < mmq_x; j += nwarps) {
+#pragma unroll
+ for (int i = 0; i < mmq_y; i += WARP_SIZE) {
+ sum[i / WARP_SIZE][j / nwarps] += vec_dot(
+ tile_x_ql, tile_x_dm, tile_x_qh, tile_x_sc,
+ tile_y_qs, tile_y_ds, item_ct1.get_local_id(2) + i,
+ item_ct1.get_local_id(1) + j, k);
+ }
+ }
+ }
+
+ /*
+ DPCT1118:10: SYCL group functions and algorithms must be encountered
+ in converged control flow. You may need to adjust the code.
+ */
+ /*
+ DPCT1065:57: Consider replacing sycl::nd_item::barrier() with
+ sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
+ better performance if there is no access to global memory.
+ */
+ item_ct1.barrier();
+ }
+ }
+
+#pragma unroll
+ for (int j = 0; j < mmq_x; j += nwarps) {
+ const int col_dst = col_dst_0 + j + item_ct1.get_local_id(1);
+
+ if (col_dst >= ncols_dst) {
+ return;
+ }
+
+#pragma unroll
+ for (int i = 0; i < mmq_y; i += WARP_SIZE) {
+ const int row_dst = row_dst_0 + item_ct1.get_local_id(2) + i;
+
+ if (row_dst >= nrows_dst) {
+ continue;
+ }
+
+ dst[col_dst*nrows_dst + row_dst] = sum[i/WARP_SIZE][j/nwarps];
+ }
+ }
+}
+
+#define MMQ_X_Q4_0_RDNA2 64
+#define MMQ_Y_Q4_0_RDNA2 128
+#define NWARPS_Q4_0_RDNA2 8
+#define MMQ_X_Q4_0_RDNA1 64
+#define MMQ_Y_Q4_0_RDNA1 64
+#define NWARPS_Q4_0_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q4_0_AMPERE 4
+#define MMQ_Y_Q4_0_AMPERE 32
+#define NWARPS_Q4_0_AMPERE 4
+#else
+#define MMQ_X_Q4_0_AMPERE 64
+#define MMQ_Y_Q4_0_AMPERE 128
+#define NWARPS_Q4_0_AMPERE 4
+#endif
+#define MMQ_X_Q4_0_PASCAL 64
+#define MMQ_Y_Q4_0_PASCAL 64
+#define NWARPS_Q4_0_PASCAL 8
+
+template <bool need_check> static void
+ mul_mat_q4_0(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_qs_q4_0, float *tile_x_d_q4_0,
+ int *tile_y_qs, sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+
+ const int mmq_x = MMQ_X_Q4_0_AMPERE;
+ const int mmq_y = MMQ_Y_Q4_0_AMPERE;
+ const int nwarps = NWARPS_Q4_0_AMPERE;
+ allocate_tiles_q4_0<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_qs_q4_0, tile_x_d_q4_0);
+ mul_mat_q<QK4_0, QR4_0, QI4_0, true, block_q4_0, mmq_x, mmq_y, nwarps,
+ load_tiles_q4_0<mmq_y, nwarps, need_check>, VDR_Q4_0_Q8_1_MMQ,
+ vec_dot_q4_0_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q4_1_RDNA2 64
+#define MMQ_Y_Q4_1_RDNA2 128
+#define NWARPS_Q4_1_RDNA2 8
+#define MMQ_X_Q4_1_RDNA1 64
+#define MMQ_Y_Q4_1_RDNA1 64
+#define NWARPS_Q4_1_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q4_1_AMPERE 4
+#define MMQ_Y_Q4_1_AMPERE 32
+#define NWARPS_Q4_1_AMPERE 4
+#else
+#define MMQ_X_Q4_1_AMPERE 64
+#define MMQ_Y_Q4_1_AMPERE 128
+#define NWARPS_Q4_1_AMPERE 4
+#endif
+#define MMQ_X_Q4_1_PASCAL 64
+#define MMQ_Y_Q4_1_PASCAL 64
+#define NWARPS_Q4_1_PASCAL 8
+
+template <bool need_check> static void
+ mul_mat_q4_1(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_qs_q4_1,
+ sycl::half2 *tile_x_dm_q4_1, int *tile_y_qs, sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q4_1_AMPERE;
+ const int mmq_y = MMQ_Y_Q4_1_AMPERE;
+ const int nwarps = NWARPS_Q4_1_AMPERE;
+ allocate_tiles_q4_1<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_qs_q4_1, tile_x_dm_q4_1);
+ mul_mat_q<QK4_1, QR4_1, QI4_1, true, block_q4_1, mmq_x, mmq_y, nwarps,
+ load_tiles_q4_1<mmq_y, nwarps, need_check>, VDR_Q4_1_Q8_1_MMQ,
+ vec_dot_q4_1_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q5_0_RDNA2 64
+#define MMQ_Y_Q5_0_RDNA2 128
+#define NWARPS_Q5_0_RDNA2 8
+#define MMQ_X_Q5_0_RDNA1 64
+#define MMQ_Y_Q5_0_RDNA1 64
+#define NWARPS_Q5_0_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q5_0_AMPERE 4
+#define MMQ_Y_Q5_0_AMPERE 32
+#define NWARPS_Q5_0_AMPERE 4
+#else
+#define MMQ_X_Q5_0_AMPERE 128
+#define MMQ_Y_Q5_0_AMPERE 64
+#define NWARPS_Q5_0_AMPERE 4
+#endif
+#define MMQ_X_Q5_0_PASCAL 64
+#define MMQ_Y_Q5_0_PASCAL 64
+#define NWARPS_Q5_0_PASCAL 8
+
+template <bool need_check> static void
+ mul_mat_q5_0(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q5_0, float *tile_x_d_q5_0,
+ int *tile_y_qs, sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q5_0_AMPERE;
+ const int mmq_y = MMQ_Y_Q5_0_AMPERE;
+ const int nwarps = NWARPS_Q5_0_AMPERE;
+ allocate_tiles_q5_0<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_ql_q5_0, tile_x_d_q5_0);
+ mul_mat_q<QK5_0, QR5_0, QI5_0, false, block_q5_0, mmq_x, mmq_y, nwarps,
+ load_tiles_q5_0<mmq_y, nwarps, need_check>, VDR_Q5_0_Q8_1_MMQ,
+ vec_dot_q5_0_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q5_1_RDNA2 64
+#define MMQ_Y_Q5_1_RDNA2 128
+#define NWARPS_Q5_1_RDNA2 8
+#define MMQ_X_Q5_1_RDNA1 64
+#define MMQ_Y_Q5_1_RDNA1 64
+#define NWARPS_Q5_1_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q5_1_AMPERE 4
+#define MMQ_Y_Q5_1_AMPERE 32
+#define NWARPS_Q5_1_AMPERE 4
+#else
+#define MMQ_X_Q5_1_AMPERE 128
+#define MMQ_Y_Q5_1_AMPERE 64
+#define NWARPS_Q5_1_AMPERE 4
+#endif
+#define MMQ_X_Q5_1_PASCAL 64
+#define MMQ_Y_Q5_1_PASCAL 64
+#define NWARPS_Q5_1_PASCAL 8
+
+template <bool need_check> static void
+mul_mat_q5_1(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q5_1,
+ sycl::half2 *tile_x_dm_q5_1, int *tile_y_qs, sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q5_1_AMPERE;
+ const int mmq_y = MMQ_Y_Q5_1_AMPERE;
+ const int nwarps = NWARPS_Q5_1_AMPERE;
+ allocate_tiles_q5_1<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_ql_q5_1, tile_x_dm_q5_1);
+ mul_mat_q<QK5_1, QR5_1, QI5_1, true, block_q5_1, mmq_x, mmq_y, nwarps,
+ load_tiles_q5_1<mmq_y, nwarps, need_check>, VDR_Q5_1_Q8_1_MMQ,
+ vec_dot_q5_1_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q8_0_RDNA2 64
+#define MMQ_Y_Q8_0_RDNA2 128
+#define NWARPS_Q8_0_RDNA2 8
+#define MMQ_X_Q8_0_RDNA1 64
+#define MMQ_Y_Q8_0_RDNA1 64
+#define NWARPS_Q8_0_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q8_0_AMPERE 4
+#define MMQ_Y_Q8_0_AMPERE 32
+#define NWARPS_Q8_0_AMPERE 4
+#else
+#define MMQ_X_Q8_0_AMPERE 128
+#define MMQ_Y_Q8_0_AMPERE 64
+#define NWARPS_Q8_0_AMPERE 4
+#endif
+#define MMQ_X_Q8_0_PASCAL 64
+#define MMQ_Y_Q8_0_PASCAL 64
+#define NWARPS_Q8_0_PASCAL 8
+
+template <bool need_check> static void
+ mul_mat_q8_0(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_qs_q8_0, float *tile_x_d_q8_0,
+ int *tile_y_qs, sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q8_0_AMPERE;
+ const int mmq_y = MMQ_Y_Q8_0_AMPERE;
+ const int nwarps = NWARPS_Q8_0_AMPERE;
+ allocate_tiles_q8_0<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_qs_q8_0, tile_x_d_q8_0);
+ mul_mat_q<QK8_0, QR8_0, QI8_0, false, block_q8_0, mmq_x, mmq_y, nwarps,
+ load_tiles_q8_0<mmq_y, nwarps, need_check>, VDR_Q8_0_Q8_1_MMQ,
+ vec_dot_q8_0_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q2_K_RDNA2 64
+#define MMQ_Y_Q2_K_RDNA2 128
+#define NWARPS_Q2_K_RDNA2 8
+#define MMQ_X_Q2_K_RDNA1 128
+#define MMQ_Y_Q2_K_RDNA1 32
+#define NWARPS_Q2_K_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q2_K_AMPERE 4
+#define MMQ_Y_Q2_K_AMPERE 32
+#define NWARPS_Q2_K_AMPERE 4
+#else
+#define MMQ_X_Q2_K_AMPERE 64
+#define MMQ_Y_Q2_K_AMPERE 128
+#define NWARPS_Q2_K_AMPERE 4
+#endif
+#define MMQ_X_Q2_K_PASCAL 64
+#define MMQ_Y_Q2_K_PASCAL 64
+#define NWARPS_Q2_K_PASCAL 8
+
+template <bool need_check> static void
+mul_mat_q2_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q2_K,
+ sycl::half2 *tile_x_dm_q2_K, int *tile_x_sc_q2_K, int *tile_y_qs,
+ sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q2_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q2_K_AMPERE;
+ const int nwarps = NWARPS_Q2_K_AMPERE;
+ allocate_tiles_q2_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_ql_q2_K, tile_x_dm_q2_K, tile_x_sc_q2_K);
+ mul_mat_q<QK_K, QR2_K, QI2_K, false, block_q2_K, mmq_x, mmq_y, nwarps,
+ load_tiles_q2_K<mmq_y, nwarps, need_check>, VDR_Q2_K_Q8_1_MMQ,
+ vec_dot_q2_K_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q3_K_RDNA2 128
+#define MMQ_Y_Q3_K_RDNA2 64
+#define NWARPS_Q3_K_RDNA2 8
+#define MMQ_X_Q3_K_RDNA1 32
+#define MMQ_Y_Q3_K_RDNA1 128
+#define NWARPS_Q3_K_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q3_K_AMPERE 4
+#define MMQ_Y_Q3_K_AMPERE 32
+#define NWARPS_Q3_K_AMPERE 4
+#else
+#define MMQ_X_Q3_K_AMPERE 128
+#define MMQ_Y_Q3_K_AMPERE 128
+#define NWARPS_Q3_K_AMPERE 4
+#endif
+#define MMQ_X_Q3_K_PASCAL 64
+#define MMQ_Y_Q3_K_PASCAL 64
+#define NWARPS_Q3_K_PASCAL 8
+
+template <bool need_check> static void
+mul_mat_q3_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q3_K,
+ sycl::half2 *tile_x_dm_q3_K, int *tile_x_qh_q3_K, int *tile_x_sc_q3_K,
+ int *tile_y_qs, sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q3_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q3_K_AMPERE;
+ const int nwarps = NWARPS_Q3_K_AMPERE;
+ allocate_tiles_q3_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_ql_q3_K, tile_x_dm_q3_K, tile_x_qh_q3_K,
+ tile_x_sc_q3_K);
+ mul_mat_q<QK_K, QR3_K, QI3_K, false, block_q3_K, mmq_x, mmq_y, nwarps,
+ load_tiles_q3_K<mmq_y, nwarps, need_check>, VDR_Q3_K_Q8_1_MMQ,
+ vec_dot_q3_K_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q4_K_RDNA2 64
+#define MMQ_Y_Q4_K_RDNA2 128
+#define NWARPS_Q4_K_RDNA2 8
+#define MMQ_X_Q4_K_RDNA1 32
+#define MMQ_Y_Q4_K_RDNA1 64
+#define NWARPS_Q4_K_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q4_K_AMPERE 4
+#define MMQ_Y_Q4_K_AMPERE 32
+#define NWARPS_Q4_K_AMPERE 4
+#else
+#define MMQ_X_Q4_K_AMPERE 64
+#define MMQ_Y_Q4_K_AMPERE 128
+#define NWARPS_Q4_K_AMPERE 4
+#endif
+#define MMQ_X_Q4_K_PASCAL 64
+#define MMQ_Y_Q4_K_PASCAL 64
+#define NWARPS_Q4_K_PASCAL 8
+
+template <bool need_check> static void
+ mul_mat_q4_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q4_K,
+ sycl::half2 *tile_x_dm_q4_K, int *tile_x_sc_q4_K, int *tile_y_qs,
+ sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q4_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q4_K_AMPERE;
+ const int nwarps = NWARPS_Q4_K_AMPERE;
+ allocate_tiles_q4_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_ql_q4_K, tile_x_dm_q4_K, tile_x_sc_q4_K);
+ mul_mat_q<QK_K, QR4_K, QI4_K, true, block_q4_K, mmq_x, mmq_y, nwarps,
+ load_tiles_q4_K<mmq_y, nwarps, need_check>, VDR_Q4_K_Q8_1_MMQ,
+ vec_dot_q4_K_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q5_K_RDNA2 64
+#define MMQ_Y_Q5_K_RDNA2 128
+#define NWARPS_Q5_K_RDNA2 8
+#define MMQ_X_Q5_K_RDNA1 32
+#define MMQ_Y_Q5_K_RDNA1 64
+#define NWARPS_Q5_K_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q5_K_AMPERE 4
+#define MMQ_Y_Q5_K_AMPERE 32
+#define NWARPS_Q5_K_AMPERE 4
+#else
+#define MMQ_X_Q5_K_AMPERE 64
+#define MMQ_Y_Q5_K_AMPERE 128
+#define NWARPS_Q5_K_AMPERE 4
+#endif
+#define MMQ_X_Q5_K_PASCAL 64
+#define MMQ_Y_Q5_K_PASCAL 64
+#define NWARPS_Q5_K_PASCAL 8
+
+template <bool need_check> static void
+mul_mat_q5_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_ql_q5_K,
+ sycl::half2 *tile_x_dm_q5_K, int *tile_x_sc_q5_K, int *tile_y_qs,
+ sycl::half2 *tile_y_ds) {
+ int * tile_x_ql = nullptr;
+ sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q5_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q5_K_AMPERE;
+ const int nwarps = NWARPS_Q5_K_AMPERE;
+ allocate_tiles_q5_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_ql_q5_K, tile_x_dm_q5_K, tile_x_sc_q5_K);
+ mul_mat_q<QK_K, QR5_K, QI5_K, true, block_q5_K, mmq_x, mmq_y, nwarps,
+ load_tiles_q5_K<mmq_y, nwarps, need_check>, VDR_Q5_K_Q8_1_MMQ,
+ vec_dot_q5_K_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+#define MMQ_X_Q6_K_RDNA2 64
+#define MMQ_Y_Q6_K_RDNA2 128
+#define NWARPS_Q6_K_RDNA2 8
+#define MMQ_X_Q6_K_RDNA1 32
+#define MMQ_Y_Q6_K_RDNA1 64
+#define NWARPS_Q6_K_RDNA1 8
+#if defined(SYCL_USE_XMX)
+#define MMQ_X_Q6_K_AMPERE 4
+#define MMQ_Y_Q6_K_AMPERE 32
+#define NWARPS_Q6_K_AMPERE 4
+#else
+#define MMQ_X_Q6_K_AMPERE 64
+#define MMQ_Y_Q6_K_AMPERE 64
+#define NWARPS_Q6_K_AMPERE 4
+#endif
+#define MMQ_X_Q6_K_PASCAL 64
+#define MMQ_Y_Q6_K_PASCAL 64
+#define NWARPS_Q6_K_PASCAL 8
+
+template <bool need_check> static void
+ mul_mat_q6_K(
+ const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols_x, const int nrows_x, const int ncols_y, const int nrows_y, const int nrows_dst,
+ const sycl::nd_item<3> &item_ct1, int *tile_x_ql, sycl::half2 *tile_x_dm,
+ int *tile_x_sc, int *tile_y_qs, sycl::half2 *tile_y_ds) {
+ // int * tile_x_ql = nullptr;
+ // sycl::half2 *tile_x_dm = nullptr;
+ int * tile_x_qh = nullptr;
+ // int * tile_x_sc = nullptr;
+
+//sycl_todo: change according to hardware
+ const int mmq_x = MMQ_X_Q6_K_AMPERE;
+ const int mmq_y = MMQ_Y_Q6_K_AMPERE;
+ const int nwarps = NWARPS_Q6_K_AMPERE;
+ allocate_tiles_q6_K<mmq_y>(&tile_x_ql, &tile_x_dm, &tile_x_qh, &tile_x_sc,
+ tile_x_ql, tile_x_dm, tile_x_sc);
+ mul_mat_q<QK_K, QR6_K, QI6_K, false, block_q6_K, mmq_x, mmq_y, nwarps,
+ load_tiles_q6_K<mmq_y, nwarps, need_check>, VDR_Q6_K_Q8_1_MMQ,
+ vec_dot_q6_K_q8_1_mul_mat>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y, nrows_dst, tile_x_ql,
+ tile_x_dm, tile_x_qh, tile_x_sc, item_ct1, tile_y_qs, tile_y_ds);
+}
+
+static void ggml_mul_mat_q4_0_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q4_0_RDNA2;
+ mmq_y = MMQ_Y_Q4_0_RDNA2;
+ nwarps = NWARPS_Q4_0_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q4_0_RDNA1;
+ mmq_y = MMQ_Y_Q4_0_RDNA1;
+ nwarps = NWARPS_Q4_0_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q4_0_AMPERE;
+ mmq_y = MMQ_Y_Q4_0_AMPERE;
+ nwarps = NWARPS_Q4_0_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q4_0_PASCAL;
+ mmq_y = MMQ_Y_Q4_0_PASCAL;
+ nwarps = NWARPS_Q4_0_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:20: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_qs_q4_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<float, 1> tile_x_d_q4_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI4_0) + mmq_y / QI4_0),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q4_0<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_qs_q4_0_acc_ct1),
+ get_pointer(tile_x_d_q4_0_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:21: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_qs_q4_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<float, 1> tile_x_d_q4_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI4_0) + mmq_y / QI4_0),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q4_0<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_qs_q4_0_acc_ct1),
+ get_pointer(tile_x_d_q4_0_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q4_1_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q4_1_RDNA2;
+ mmq_y = MMQ_Y_Q4_1_RDNA2;
+ nwarps = NWARPS_Q4_1_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q4_1_RDNA1;
+ mmq_y = MMQ_Y_Q4_1_RDNA1;
+ nwarps = NWARPS_Q4_1_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q4_1_AMPERE;
+ mmq_y = MMQ_Y_Q4_1_AMPERE;
+ nwarps = NWARPS_Q4_1_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q4_1_PASCAL;
+ mmq_y = MMQ_Y_Q4_1_PASCAL;
+ nwarps = NWARPS_Q4_1_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:22: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_qs_q4_1_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + +mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_1_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI4_1) + mmq_y / QI4_1),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q4_1<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_qs_q4_1_acc_ct1),
+ get_pointer(tile_x_dm_q4_1_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:23: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_qs_q4_1_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + +mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_1_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI4_1) + mmq_y / QI4_1),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q4_1<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_qs_q4_1_acc_ct1),
+ get_pointer(tile_x_dm_q4_1_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q5_0_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q5_0_RDNA2;
+ mmq_y = MMQ_Y_Q5_0_RDNA2;
+ nwarps = NWARPS_Q5_0_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q5_0_RDNA1;
+ mmq_y = MMQ_Y_Q5_0_RDNA1;
+ nwarps = NWARPS_Q5_0_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q5_0_AMPERE;
+ mmq_y = MMQ_Y_Q5_0_AMPERE;
+ nwarps = NWARPS_Q5_0_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q5_0_PASCAL;
+ mmq_y = MMQ_Y_Q5_0_PASCAL;
+ nwarps = NWARPS_Q5_0_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:24: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q5_0_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<float, 1> tile_x_d_q5_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI5_0) + mmq_y / QI5_0),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q5_0<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q5_0_acc_ct1),
+ get_pointer(tile_x_d_q5_0_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:25: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q5_0_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<float, 1> tile_x_d_q5_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI5_0) + mmq_y / QI5_0),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q5_0<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q5_0_acc_ct1),
+ get_pointer(tile_x_d_q5_0_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q5_1_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q5_1_RDNA2;
+ mmq_y = MMQ_Y_Q5_1_RDNA2;
+ nwarps = NWARPS_Q5_1_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q5_1_RDNA1;
+ mmq_y = MMQ_Y_Q5_1_RDNA1;
+ nwarps = NWARPS_Q5_1_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q5_1_AMPERE;
+ mmq_y = MMQ_Y_Q5_1_AMPERE;
+ nwarps = NWARPS_Q5_1_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q5_1_PASCAL;
+ mmq_y = MMQ_Y_Q5_1_PASCAL;
+ nwarps = NWARPS_Q5_1_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:26: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q5_1_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_1_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI5_1) + mmq_y / QI5_1),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q5_1<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q5_1_acc_ct1),
+ get_pointer(tile_x_dm_q5_1_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:27: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q5_1_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_1_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI5_1) + mmq_y / QI5_1),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q5_1<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q5_1_acc_ct1),
+ get_pointer(tile_x_dm_q5_1_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q8_0_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q8_0_RDNA2;
+ mmq_y = MMQ_Y_Q8_0_RDNA2;
+ nwarps = NWARPS_Q8_0_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q8_0_RDNA1;
+ mmq_y = MMQ_Y_Q8_0_RDNA1;
+ nwarps = NWARPS_Q8_0_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q8_0_AMPERE;
+ mmq_y = MMQ_Y_Q8_0_AMPERE;
+ nwarps = NWARPS_Q8_0_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q8_0_PASCAL;
+ mmq_y = MMQ_Y_Q8_0_PASCAL;
+ nwarps = NWARPS_Q8_0_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:28: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_qs_q8_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<float, 1> tile_x_d_q8_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI8_0) + mmq_y / QI8_0),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q8_0<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_qs_q8_0_acc_ct1),
+ get_pointer(tile_x_d_q8_0_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:29: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_qs_q8_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<float, 1> tile_x_d_q8_0_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI8_0) + mmq_y / QI8_0),
+ cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q8_0<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_qs_q8_0_acc_ct1),
+ get_pointer(tile_x_d_q8_0_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q2_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q2_K_RDNA2;
+ mmq_y = MMQ_Y_Q2_K_RDNA2;
+ nwarps = NWARPS_Q2_K_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q2_K_RDNA1;
+ mmq_y = MMQ_Y_Q2_K_RDNA1;
+ nwarps = NWARPS_Q2_K_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q2_K_AMPERE;
+ mmq_y = MMQ_Y_Q2_K_AMPERE;
+ nwarps = NWARPS_Q2_K_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q2_K_PASCAL;
+ mmq_y = MMQ_Y_Q2_K_PASCAL;
+ nwarps = NWARPS_Q2_K_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:30: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q2_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q2_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI2_K) + mmq_y / QI2_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q2_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q2_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q2_K_acc_ct1),
+ get_pointer(tile_x_dm_q2_K_acc_ct1),
+ get_pointer(tile_x_sc_q2_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:31: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q2_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q2_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI2_K) + mmq_y / QI2_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q2_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q2_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q2_K_acc_ct1),
+ get_pointer(tile_x_dm_q2_K_acc_ct1),
+ get_pointer(tile_x_sc_q2_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q3_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+#if QK_K == 256
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q3_K_RDNA2;
+ mmq_y = MMQ_Y_Q3_K_RDNA2;
+ nwarps = NWARPS_Q3_K_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q3_K_RDNA1;
+ mmq_y = MMQ_Y_Q3_K_RDNA1;
+ nwarps = NWARPS_Q3_K_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q3_K_AMPERE;
+ mmq_y = MMQ_Y_Q3_K_AMPERE;
+ nwarps = NWARPS_Q3_K_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q3_K_PASCAL;
+ mmq_y = MMQ_Y_Q3_K_PASCAL;
+ nwarps = NWARPS_Q3_K_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:32: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI3_K) + mmq_y / QI3_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_qh_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 2) + mmq_y / 2), cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q3_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q3_K_acc_ct1),
+ get_pointer(tile_x_dm_q3_K_acc_ct1),
+ get_pointer(tile_x_qh_q3_K_acc_ct1),
+ get_pointer(tile_x_sc_q3_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:33: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI3_K) + mmq_y / QI3_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_qh_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 2) + mmq_y / 2), cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q3_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 4) + mmq_y / 4), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q3_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q3_K_acc_ct1),
+ get_pointer(tile_x_dm_q3_K_acc_ct1),
+ get_pointer(tile_x_qh_q3_K_acc_ct1),
+ get_pointer(tile_x_sc_q3_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+#endif
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q4_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q4_K_RDNA2;
+ mmq_y = MMQ_Y_Q4_K_RDNA2;
+ nwarps = NWARPS_Q4_K_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q4_K_RDNA1;
+ mmq_y = MMQ_Y_Q4_K_RDNA1;
+ nwarps = NWARPS_Q4_K_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q4_K_AMPERE;
+ mmq_y = MMQ_Y_Q4_K_AMPERE;
+ nwarps = NWARPS_Q4_K_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q4_K_PASCAL;
+ mmq_y = MMQ_Y_Q4_K_PASCAL;
+ nwarps = NWARPS_Q4_K_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:34: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q4_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI4_K) + mmq_y / QI4_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q4_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q4_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q4_K_acc_ct1),
+ get_pointer(tile_x_dm_q4_K_acc_ct1),
+ get_pointer(tile_x_sc_q4_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:35: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q4_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q4_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI4_K) + mmq_y / QI4_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q4_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q4_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q4_K_acc_ct1),
+ get_pointer(tile_x_dm_q4_K_acc_ct1),
+ get_pointer(tile_x_sc_q4_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q5_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q5_K_RDNA2;
+ mmq_y = MMQ_Y_Q5_K_RDNA2;
+ nwarps = NWARPS_Q5_K_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q5_K_RDNA1;
+ mmq_y = MMQ_Y_Q5_K_RDNA1;
+ nwarps = NWARPS_Q5_K_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q5_K_AMPERE;
+ mmq_y = MMQ_Y_Q5_K_AMPERE;
+ nwarps = NWARPS_Q5_K_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q5_K_PASCAL;
+ mmq_y = MMQ_Y_Q5_K_PASCAL;
+ nwarps = NWARPS_Q5_K_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:36: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q5_K_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI5_K) + mmq_y / QI5_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q5_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q5_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q5_K_acc_ct1),
+ get_pointer(tile_x_dm_q5_K_acc_ct1),
+ get_pointer(tile_x_sc_q5_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:37: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_q5_K_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_q5_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI5_K) + mmq_y / QI5_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_q5_K_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q5_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_q5_K_acc_ct1),
+ get_pointer(tile_x_dm_q5_K_acc_ct1),
+ get_pointer(tile_x_sc_q5_K_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+static void ggml_mul_mat_q6_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols_x,
+ const int nrows_x, const int ncols_y,
+ const int nrows_y, const int nrows_dst,
+ dpct::queue_ptr stream) try {
+
+ int id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(id = get_current_device_id()));
+ const int compute_capability = ggml_sycl_info().devices[id].cc;
+
+ int mmq_x, mmq_y, nwarps;
+ if (compute_capability >= VER_GEN13) {
+ mmq_x = MMQ_X_Q6_K_RDNA2;
+ mmq_y = MMQ_Y_Q6_K_RDNA2;
+ nwarps = NWARPS_Q6_K_RDNA2;
+ } else if (compute_capability >= VER_GEN12) {
+ mmq_x = MMQ_X_Q6_K_RDNA1;
+ mmq_y = MMQ_Y_Q6_K_RDNA1;
+ nwarps = NWARPS_Q6_K_RDNA1;
+ } else if (compute_capability >= VER_GEN9) {
+ mmq_x = MMQ_X_Q6_K_AMPERE;
+ mmq_y = MMQ_Y_Q6_K_AMPERE;
+ nwarps = NWARPS_Q6_K_AMPERE;
+ } else if (compute_capability >= VER_4VEC) {
+ mmq_x = MMQ_X_Q6_K_PASCAL;
+ mmq_y = MMQ_Y_Q6_K_PASCAL;
+ nwarps = NWARPS_Q6_K_PASCAL;
+ } else {
+ GGML_ABORT("fatal error");
+ }
+
+ const int block_num_x = (nrows_x + mmq_y - 1) / mmq_y;
+ const int block_num_y = (ncols_y + mmq_x - 1) / mmq_x;
+ const sycl::range<3> block_nums(1, block_num_y, block_num_x);
+ const sycl::range<3> block_dims(1, nwarps, WARP_SIZE);
+
+ if (nrows_x % mmq_y == 0) {
+ const bool need_check = false;
+ /*
+ DPCT1049:38: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI6_K) + mmq_y / QI6_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q6_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_acc_ct1),
+ get_pointer(tile_x_dm_acc_ct1),
+ get_pointer(tile_x_sc_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ } else {
+ const bool need_check = true;
+ /*
+ DPCT1049:39: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ {
+ dpct::has_capability_or_fail(stream->get_device(),
+ {sycl::aspect::fp16});
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<int, 1> tile_x_ql_acc_ct1(
+ sycl::range<1>(mmq_y * (2 * WARP_SIZE) + mmq_y), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_x_dm_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / QI6_K) + mmq_y / QI6_K),
+ cgh);
+ sycl::local_accessor<int, 1> tile_x_sc_acc_ct1(
+ sycl::range<1>(mmq_y * (WARP_SIZE / 8) + mmq_y / 8), cgh);
+ sycl::local_accessor<int, 1> tile_y_qs_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE), cgh);
+ sycl::local_accessor<sycl::half2, 1> tile_y_ds_acc_ct1(
+ sycl::range<1>(mmq_x * WARP_SIZE / QI8_1), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ mul_mat_q6_K<need_check>(
+ vx, vy, dst, ncols_x, nrows_x, ncols_y, nrows_y,
+ nrows_dst, item_ct1,
+ get_pointer(tile_x_ql_acc_ct1),
+ get_pointer(tile_x_dm_acc_ct1),
+ get_pointer(tile_x_sc_acc_ct1),
+ get_pointer(tile_y_qs_acc_ct1),
+ get_pointer(tile_y_ds_acc_ct1));
+ });
+ });
+ }
+ }
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
+
+void ggml_sycl_op_mul_mat_q(
+ ggml_backend_sycl_context & ctx,
+ const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
+ const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
+ float *dst_dd_i, const int64_t row_low, const int64_t row_high,
+ const int64_t src1_ncols, const int64_t src1_padded_row_size,
+ const dpct::queue_ptr &stream) try {
+
+ const int64_t ne00 = src0->ne[0];
+
+ const int64_t ne10 = src1->ne[0];
+ GGML_ASSERT(ne10 % QK8_1 == 0);
+
+ const int64_t ne0 = dst->ne[0];
+
+ const int64_t row_diff = row_high - row_low;
+
+ int device_id;
+ SYCL_CHECK(
+ CHECK_TRY_ERROR(device_id = get_current_device_id()));
+
+ // the main device has a larger memory buffer to hold the results from all GPUs
+ // nrows_dst == nrows of the matrix that the dequantize_mul_mat kernel writes into
+ const int64_t nrows_dst = device_id == ctx.device ? ne0 : row_diff;
+
+ switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ ggml_mul_mat_q4_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q4_1:
+ ggml_mul_mat_q4_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q5_0:
+ ggml_mul_mat_q5_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q5_1:
+ ggml_mul_mat_q5_1_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q8_0:
+ ggml_mul_mat_q8_0_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q2_K:
+ ggml_mul_mat_q2_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q3_K:
+ ggml_mul_mat_q3_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q4_K:
+ ggml_mul_mat_q4_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q5_K:
+ ggml_mul_mat_q5_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ case GGML_TYPE_Q6_K:
+ ggml_mul_mat_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i, dst_dd_i, ne00, row_diff, src1_ncols, src1_padded_row_size, nrows_dst, stream);
+ break;
+ default:
+ GGML_ABORT("fatal error");
+ }
+
+ GGML_UNUSED(src1);
+ GGML_UNUSED(dst);
+ GGML_UNUSED(src1_ddf_i);
+}
+catch (sycl::exception const &exc) {
+ std::cerr << exc.what() << "Exception caught at file:" << __FILE__
+ << ", line:" << __LINE__ << std::endl;
+ std::exit(1);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/mmq.hpp b/llama.cpp/ggml/src/ggml-sycl/mmq.hpp
new file mode 100644
index 0000000..3f5297a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/mmq.hpp
@@ -0,0 +1,33 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_MMQ_HPP
+#define GGML_SYCL_MMQ_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_mul_mat_q(
+ ggml_backend_sycl_context & ctx,
+ const ggml_tensor* src0,
+ const ggml_tensor* src1,
+ ggml_tensor* dst,
+ const char* src0_dd_i,
+ const float* src1_ddf_i,
+ const char* src1_ddq_i,
+ float* dst_dd_i,
+ const int64_t row_low,
+ const int64_t row_high,
+ const int64_t src1_ncols,
+ const int64_t src1_padded_row_size,
+ const dpct::queue_ptr& stream);
+
+#endif // GGML_SYCL_MMQ_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp b/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp
new file mode 100644
index 0000000..316aa0d
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/mmvq.cpp
@@ -0,0 +1,1156 @@
+#include "mmvq.hpp"
+
+#include "ggml.h"
+#include "common.hpp"
+#include "quants.hpp"
+#include "vecdotq.hpp"
+
+template <typename reorder_vec_dot_q_sycl>
+static void mul_mat_vec_q_reorder(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols, const int nrows, const sycl::nd_item<3> & nd_item) {
+ using block_type = ggml_sycl_reordered::block_q_t<reorder_vec_dot_q_sycl::gtype>;
+ using block_traits = typename block_type::traits;
+
+ const auto sg = nd_item.get_sub_group();
+ const int sg_range = sg.get_group_linear_range();
+ const int workgroup_id = nd_item.get_group_linear_id();
+ const int sg_id = sg.get_group_linear_id();
+ const int row = workgroup_id * sg_range + sg_id;
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / block_traits::qk;
+ constexpr int blocks_per_subgroup = ceil_div(block_traits::vdr_mmvq * WARP_SIZE, block_traits::qi);
+ constexpr int block_elements_per_subgroup = block_traits::qi / block_traits::vdr_mmvq;
+ const int nblocks = nrows * (ncols / block_traits::qk);
+
+ static_assert(blocks_per_subgroup > 0);
+ static_assert(block_elements_per_subgroup > 0);
+
+ float partial_sum = 0.0f;
+ for (int i = sg.get_local_linear_id() / block_elements_per_subgroup; i < blocks_per_row; i += blocks_per_subgroup) {
+ const int ibx = row * blocks_per_row + i; // x block index
+
+ const auto bx_offset = block_type::get_block_offset(ibx, nblocks);
+ const auto d_offset = block_type::get_d_offset(nrows, ncols, ibx);
+ // Y block index that aligns with ibx
+ const int iby = i * block_type::block_to_q8_1_ratio();
+ const int8_t* q8_1_quant_ptr = (const int8_t*)vy + iby * QK8_1;
+ const sycl::half2* q8_1_ds_ptr = (const sycl::half2*)((const char*)vy + ncols + iby * sizeof(sycl::half2));
+
+#pragma unroll
+ for (int elem = 0; elem < block_elements_per_subgroup; elem += WARP_SIZE) {
+ // x block quant index when casting the quants to int
+ const int iqs = elem + block_traits::vdr_mmvq * (sg.get_local_linear_id() % block_elements_per_subgroup);
+
+ partial_sum += reorder_vec_dot_q_sycl()(vx, bx_offset, d_offset, q8_1_quant_ptr, q8_1_ds_ptr, iqs);
+ }
+ }
+
+ auto sum = sycl::reduce_over_group(nd_item.get_sub_group(), partial_sum, std::plus<>());
+
+ if (sg.leader()) {
+ dst[row] = sum;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr, vec_dot_q_sycl_t vec_dot_q_sycl>
+static void mul_mat_vec_q(const void * __restrict__ vx, const void * __restrict__ vy, float * __restrict__ dst,
+ const int ncols, const int nrows, const sycl::nd_item<3> & item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ constexpr int blocks_per_warp = (vdr * WARP_SIZE + qi - 1) / qi; // Ensuring blocks_per_warp > 0
+
+ assert(blocks_per_warp > 0);
+
+ // partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row; i += blocks_per_warp) {
+ const int ibx = row * blocks_per_row + i; // x block index
+
+ const int iby = i * (qk / QK8_1); // y block index that aligns with ibx
+
+ for (size_t elem = 0; elem < qi / vdr; elem += WARP_SIZE) {
+ const int iqs = elem + vdr * (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_q_sycl(&x[ibx], &y[iby], iqs);
+ }
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp += dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq2_xxs_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq2_xxs_q8_1(&x[ibx], &y[iby], iqs, iq2xxs_grid, ksigns_iq2xs, kmask_iq2xs);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq2_xs_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq2_xs_q8_1(&x[ibx], &y[iby], iqs, iq2xs_grid, ksigns64);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq2_s_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq2_s_q8_1(&x[ibx], &y[iby], iqs);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq3_xxs_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq3_xxs_q8_1(&x[ibx], &y[iby], iqs, iq3xxs_grid, ksigns64);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq3_s_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq3_s_q8_1(&x[ibx], &y[iby], iqs, iq3s_grid);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq1_s_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq1_s_q8_1(&x[ibx], &y[iby], iqs, iq1s_grid_gpu);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq1_m_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq1_m_q8_1(&x[ibx], &y[iby], iqs);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq4_nl_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq4_nl_q8_1(&x[ibx], &y[iby], iqs);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+
+template <int qk, int qi, typename block_q_t, int vdr>
+static void mul_mat_vec_q_iq4_xs_q8_1(const void *__restrict__ vx,
+ const void *__restrict__ vy,
+ float *__restrict__ dst, const int ncols,
+ const int nrows,
+ const sycl::nd_item<3> &item_ct1) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+
+ if (row >= nrows) {
+ return;
+ }
+
+ const int blocks_per_row = ncols / qk;
+ const int blocks_per_warp = vdr * WARP_SIZE / qi;
+ assert(blocks_per_warp>0);
+// partial sum for each thread
+ float tmp = 0.0f;
+
+ const block_q_t * x = (const block_q_t *) vx;
+ const block_q8_1 * y = (const block_q8_1 *) vy;
+
+ for (int i = item_ct1.get_local_id(2) / (qi / vdr); i < blocks_per_row;
+ i += blocks_per_warp) {
+ const int ibx = row*blocks_per_row + i; // x block index
+
+ const int iby = i * (qk/QK8_1); // y block index that aligns with ibx
+
+ const int iqs =
+ vdr *
+ (item_ct1.get_local_id(2) %
+ (qi / vdr)); // x block quant index when casting the quants to int
+
+ tmp += vec_dot_iq4_xs_q8_1(&x[ibx], &y[iby], iqs);
+ }
+
+ // sum up partial sums and write back result
+#pragma unroll
+ for (int mask = WARP_SIZE / 2; mask > 0; mask >>= 1) {
+ tmp +=
+ dpct::permute_sub_group_by_xor(item_ct1.get_sub_group(), tmp, mask);
+ }
+
+ if (item_ct1.get_local_id(2) == 0) {
+ dst[row] = tmp;
+ }
+}
+
+static void reorder_mul_mat_vec_q4_0_q8_1_sycl(const void * vx, const void * vy, float * dst, const int ncols,
+ const int nrows, dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK4_0 == 0);
+ const int block_num_y = ceil_div(nrows, GGML_SYCL_MMV_Y);
+ constexpr size_t num_subgroups = 16;
+ GGML_ASSERT(block_num_y % num_subgroups == 0);
+
+ const sycl::range<3> global_size(1, GGML_SYCL_MMV_Y, (block_num_y * WARP_SIZE));
+ const sycl::range<3> workgroup_size(1, GGML_SYCL_MMV_Y, num_subgroups * WARP_SIZE);
+
+ stream->submit([&](sycl::handler & cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(global_size, workgroup_size),
+ [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_reorder<reorder_vec_dot_q_sycl<GGML_TYPE_Q4_0>>(vx, vy, dst, ncols, nrows,
+ nd_item);
+ });
+ });
+}
+
+static void mul_mat_vec_q4_0_q8_1_sycl(const void * vx, const void * vy, float * dst, const int ncols, const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK4_0 == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+
+ {
+ stream->submit([&](sycl::handler & cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK4_0, QI4_0, block_q4_0, VDR_Q4_0_Q8_1_MMVQ, vec_dot_q4_0_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_q4_1_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK4_1 == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK4_0, QI4_1, block_q4_1,
+ VDR_Q4_1_Q8_1_MMVQ, vec_dot_q4_1_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_mxfp4_q8_1_sycl(const void * vx, const void * vy, float * dst, const int ncols, const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_MXFP4 == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+
+ {
+ stream->submit([&](sycl::handler & cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK_MXFP4, QI_MXFP4, block_mxfp4, VDR_MXFP4_Q8_1_MMVQ, vec_dot_mxfp4_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+
+static void mul_mat_vec_q5_0_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK5_0 == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK5_0, QI5_0, block_q5_0,
+ VDR_Q5_0_Q8_1_MMVQ, vec_dot_q5_0_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_q5_1_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK5_1 == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK5_1, QI5_1, block_q5_1,
+ VDR_Q5_1_Q8_1_MMVQ, vec_dot_q5_1_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_q8_0_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK8_0 == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK8_0, QI8_0, block_q8_0,
+ VDR_Q8_0_Q8_1_MMVQ, vec_dot_q8_0_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_q2_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK_K, QI2_K, block_q2_K,
+ VDR_Q2_K_Q8_1_MMVQ, vec_dot_q2_K_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_q3_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK_K, QI3_K, block_q3_K,
+ VDR_Q3_K_Q8_1_MMVQ, vec_dot_q3_K_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_q4_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK_K, QI4_K, block_q4_K,
+ VDR_Q4_K_Q8_1_MMVQ, vec_dot_q4_K_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void reorder_mul_mat_vec_q4_k_q8_1_sycl(const void * vx, const void * vy, float * dst, const int ncols,
+ const int nrows, dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+
+ const int block_num_y = ceil_div(nrows, GGML_SYCL_MMV_Y);
+ constexpr size_t num_subgroups = 16;
+ GGML_ASSERT(block_num_y % num_subgroups == 0);
+
+ const sycl::range<3> global_size(1, GGML_SYCL_MMV_Y, block_num_y * WARP_SIZE);
+ const sycl::range<3> workgroup_size(1, GGML_SYCL_MMV_Y, num_subgroups * WARP_SIZE);
+
+ stream->submit([&](sycl::handler & cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(global_size, workgroup_size),
+ [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_reorder<reorder_vec_dot_q_sycl<GGML_TYPE_Q4_K>>(vx, vy, dst, ncols,
+ nrows, nd_item);
+ });
+ });
+}
+
+
+static void mul_mat_vec_q5_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK_K, QI5_K, block_q5_K,
+ VDR_Q5_K_Q8_1_MMVQ, vec_dot_q5_K_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void reorder_mul_mat_vec_q6_k_q8_1_sycl(const void * vx, const void * vy, float * dst, const int ncols,
+ const int nrows, dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = ceil_div(nrows, GGML_SYCL_MMV_Y);
+ constexpr size_t num_subgroups = 16;
+ GGML_ASSERT(block_num_y % num_subgroups == 0);
+
+ const sycl::range<3> global_size(1, GGML_SYCL_MMV_Y, block_num_y * WARP_SIZE);
+ const sycl::range<3> workgroup_size(1, GGML_SYCL_MMV_Y, num_subgroups * WARP_SIZE);
+
+ stream->submit([&](sycl::handler & cgh) {
+ cgh.parallel_for(sycl::nd_range<3>(global_size, workgroup_size),
+ [=](sycl::nd_item<3> nd_item) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_reorder<reorder_vec_dot_q_sycl<GGML_TYPE_Q6_K>>(vx, vy, dst, ncols, nrows,
+ nd_item);
+ });
+ });
+}
+static void mul_mat_vec_q6_K_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q<QK_K, QI6_K, block_q6_K,
+ VDR_Q6_K_Q8_1_MMVQ, vec_dot_q6_K_q8_1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+
+static void mul_mat_vec_iq2_xxs_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq2_xxs_q8_1<QK_K, QI2_XXS/2, block_iq2_xxs, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq2_xs_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ stream->submit([&](sycl::handler & cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq2_xs_q8_1<QK_K, QI2_XS/2, block_iq2_xs, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq2_s_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq2_s_q8_1<QK_K, QI2_S/2, block_iq2_s, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq3_xxs_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq3_xxs_q8_1<QK_K, QI3_XXS/2, block_iq3_xxs, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq3_s_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq3_s_q8_1<QK_K, QI3_S/2, block_iq3_s, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq1_s_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq1_s_q8_1<QK_K, QI1_S, block_iq1_s, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq1_m_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq1_m_q8_1<QK_K, QI1_S, block_iq1_m, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq4_nl_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK4_NL == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq4_nl_q8_1<QK4_NL, QI4_NL, block_iq4_nl, 2>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+static void mul_mat_vec_iq4_xs_q8_1_sycl(const void *vx, const void *vy,
+ float *dst, const int ncols,
+ const int nrows,
+ dpct::queue_ptr stream) {
+ GGML_ASSERT(ncols % QK_K == 0);
+ const int block_num_y = (nrows + GGML_SYCL_MMV_Y - 1) / GGML_SYCL_MMV_Y;
+ const sycl::range<3> block_nums(1, 1, block_num_y);
+ const sycl::range<3> block_dims(1, GGML_SYCL_MMV_Y, WARP_SIZE);
+ {
+
+ stream->submit([&](sycl::handler &cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ mul_mat_vec_q_iq4_xs_q8_1<QK_K, QI4_XS/4, block_iq4_xs, 1>(
+ vx, vy, dst, ncols, nrows, item_ct1);
+ });
+ });
+ }
+}
+
+void ggml_sycl_op_mul_mat_vec_q(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1,
+ ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low,
+ const int64_t row_high, const int64_t src1_ncols, const int64_t src1_padded_col_size,
+ const dpct::queue_ptr & stream) {
+ const int64_t ne10 = src1->ne[0];
+ GGML_ASSERT(ne10 % QK8_1 == 0);
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t row_diff = row_high - row_low;
+
+ int id;
+ SYCL_CHECK(CHECK_TRY_ERROR(id = get_current_device_id()));
+ const size_t q8_1_ts = sizeof(block_q8_1);
+ const size_t q8_1_bs = QK8_1;
+ // the main device has a larger memory buffer to hold the results from all GPUs
+ // nrows_dst == nrows of the matrix that the kernel writes into
+
+ for (int i = 0; i < src1_ncols; i++) {
+ const size_t src1_ddq_i_offset = i * src1_padded_col_size * q8_1_ts / q8_1_bs;
+ const char * src1_ddq_i_bs = src1_ddq_i + src1_ddq_i_offset;
+ float * dst_dd_i_bs = dst_dd_i + i * dst->ne[0];
+ switch (src0->type) {
+ case GGML_TYPE_Q4_0:
+ if ((ggml_tensor_extra_gpu *) dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) {
+ GGML_SYCL_DEBUG("Calling reorder_mul_mat_vec_q4_0_q8_1_sycl\n");
+ reorder_mul_mat_vec_q4_0_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ } else {
+ GGML_SYCL_DEBUG("Calling mul_mat_vec_q4_0_q8_1_sycl\n");
+ mul_mat_vec_q4_0_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ }
+ break;
+ case GGML_TYPE_Q4_1:
+ mul_mat_vec_q4_1_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_0:
+ mul_mat_vec_q5_0_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q5_1:
+ mul_mat_vec_q5_1_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q8_0:
+ mul_mat_vec_q8_0_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q2_K:
+ mul_mat_vec_q2_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q3_K:
+ mul_mat_vec_q3_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q4_K:
+ if ((ggml_tensor_extra_gpu *) dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) {
+ GGML_SYCL_DEBUG("Calling reorder_mul_mat_vec_q4_k_q8_1_sycl\n");
+ reorder_mul_mat_vec_q4_k_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ } else {
+ GGML_SYCL_DEBUG("Calling mul_mat_vec_q4_K_q8_1_sycl\n");
+ mul_mat_vec_q4_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ }
+ break;
+ case GGML_TYPE_Q5_K:
+ mul_mat_vec_q5_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_Q6_K:
+ if ((ggml_tensor_extra_gpu *) dst->src[0]->extra &&
+ ((ggml_tensor_extra_gpu *) dst->src[0]->extra)->optimized_feature.reorder) {
+ GGML_SYCL_DEBUG("Calling reorder_mul_mat_vec_q6_k_q8_1_sycl\n");
+ reorder_mul_mat_vec_q6_k_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ } else {
+ GGML_SYCL_DEBUG("Calling mul_mat_vec_q6_k_q8_1_sycl\n");
+ mul_mat_vec_q6_K_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ }
+ break;
+ case GGML_TYPE_IQ1_S:
+ mul_mat_vec_iq1_s_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ1_M:
+ mul_mat_vec_iq1_m_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ2_XXS:
+ mul_mat_vec_iq2_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ2_XS:
+ mul_mat_vec_iq2_xs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ2_S:
+ mul_mat_vec_iq2_s_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ3_XXS:
+ mul_mat_vec_iq3_xxs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ3_S:
+ mul_mat_vec_iq3_s_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ4_NL:
+ mul_mat_vec_iq4_nl_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_IQ4_XS:
+ mul_mat_vec_iq4_xs_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ case GGML_TYPE_MXFP4:
+ mul_mat_vec_mxfp4_q8_1_sycl(src0_dd_i, src1_ddq_i_bs, dst_dd_i_bs, ne00, row_diff, stream);
+ break;
+ default:
+ GGML_ABORT("fatal error");
+ }
+ }
+ GGML_UNUSED(src1);
+ GGML_UNUSED(dst);
+ GGML_UNUSED(src1_ddf_i);
+ GGML_UNUSED(ctx);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp b/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp
new file mode 100644
index 0000000..049b43d
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/mmvq.hpp
@@ -0,0 +1,27 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_MMVQ_HPP
+#define GGML_SYCL_MMVQ_HPP
+
+#include "common.hpp"
+
+
+void ggml_sycl_op_mul_mat_vec_q(
+ ggml_backend_sycl_context & ctx,
+ const ggml_tensor *src0, const ggml_tensor *src1, ggml_tensor *dst,
+ const char *src0_dd_i, const float *src1_ddf_i, const char *src1_ddq_i,
+ float *dst_dd_i, const int64_t row_low, const int64_t row_high,
+ const int64_t src1_ncols, const int64_t src1_padded_row_size,
+ const dpct::queue_ptr &stream);
+
+#endif // GGML_SYCL_MMVQ_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/norm.cpp b/llama.cpp/ggml/src/ggml-sycl/norm.cpp
new file mode 100644
index 0000000..00702b5
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/norm.cpp
@@ -0,0 +1,654 @@
+#include "norm.hpp"
+#include "ggml-sycl/common.hpp"
+#include "ggml-sycl/presets.hpp"
+
+static void norm_f32(const float* x, float* dst, const int ncols, const int64_t stride_row, const int64_t stride_channel,
+ const int64_t stride_sample, const float eps, const sycl::nd_item<3>& item_ct1, sycl::float2* s_sum, int block_size) {
+
+ const int nrows = item_ct1.get_group_range(2);
+ const int nchannels = item_ct1.get_group_range(1);
+
+ const int nthreads = item_ct1.get_local_range(2);
+ const int sample = item_ct1.get_group(0);
+ const int channel = item_ct1.get_group(1);
+ const int row = item_ct1.get_group(2);
+
+ const int tid = item_ct1.get_local_id(2);
+ const int nwarps = nthreads / WARP_SIZE;
+
+ const auto strided_offset = calculate_offset<3>({stride_sample, stride_channel, stride_row}, {sample, channel, row});
+ const auto packed_offset = calculate_offset<3>({nchannels * nrows * ncols, nrows * ncols, ncols}, {sample, channel, row});
+
+ x += strided_offset;
+ dst += packed_offset;
+
+ sycl::float2 mean_var = sycl::float2(0.f, 0.f);
+
+ for (int col = tid; col < ncols; col += block_size) {
+ const float xi = x[col];
+ mean_var.x() += xi;
+ mean_var.y() += xi * xi;
+ }
+
+ // sum up partial sums
+ mean_var = warp_reduce_sum(mean_var, item_ct1);
+ if (block_size > WARP_SIZE) {
+ const auto sub_group = item_ct1.get_sub_group();
+ const auto sg_id = sub_group.get_group_linear_id();
+ const auto wi_in_sg = sub_group.get_local_linear_id();
+ if (wi_in_sg == 0) {
+ s_sum[sg_id] = mean_var;
+ }
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ mean_var = 0.f;
+ const size_t nreduce = ceil_div(nwarps, WARP_SIZE);
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ mean_var += s_sum[wi_in_sg + i * WARP_SIZE];
+ }
+ mean_var = warp_reduce_sum(mean_var, item_ct1);
+ }
+
+ const float mean = mean_var.x() / ncols;
+ const float var = mean_var.y() / ncols - mean * mean;
+ const float inv_std = sycl::rsqrt(var + eps);
+
+ for (int col = tid; col < ncols; col += block_size) {
+ dst[col] = (x[col] - mean) * inv_std;
+ }
+}
+
+static void group_norm_f32(const float* x, float* dst, const int group_size, const int ne_elements, const float eps,
+ const sycl::nd_item<3>& item_ct1, float* s_sum, int block_size) {
+ int start = item_ct1.get_group(2) * group_size;
+ int end = start + group_size;
+ const int nthreads = item_ct1.get_local_range(2);
+ const int nwarps = nthreads / WARP_SIZE;
+ start += item_ct1.get_local_id(2);
+ size_t nreduce = nwarps / WARP_SIZE;
+
+ if (end >= ne_elements) {
+ end = ne_elements;
+ }
+
+ float tmp = 0.0f; // partial sum for thread in warp
+
+ for (int j = start; j < end; j += block_size) {
+ tmp += x[j];
+ }
+
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ if (block_size > WARP_SIZE) {
+
+ int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+ if (lane_id == 0) {
+ s_sum[warp_id] = tmp;
+ }
+ /*
+ DPCT1118:1: SYCL group functions and algorithms must be encountered in
+ converged control flow. You may need to adjust the code.
+ */
+ /*
+ DPCT1065:54: Consider replacing sycl::nd_item::barrier() with
+ sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
+ better performance if there is no access to global memory.
+ */
+ item_ct1.barrier();
+ tmp = 0.f;
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ tmp += s_sum[lane_id + i * WARP_SIZE];
+ }
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ }
+
+ float mean = tmp / group_size;
+ tmp = 0.0f;
+
+ for (int j = start; j < end; j += block_size) {
+ float xi = x[j] - mean;
+ dst[j] = xi;
+ tmp += xi * xi;
+ }
+
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ if (block_size > WARP_SIZE) {
+
+ int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+ if (lane_id == 0) {
+ s_sum[warp_id] = tmp;
+ }
+ /*
+ DPCT1118:2: SYCL group functions and algorithms must be encountered in
+ converged control flow. You may need to adjust the code.
+ */
+ /*
+ DPCT1065:55: Consider replacing sycl::nd_item::barrier() with
+ sycl::nd_item::barrier(sycl::access::fence_space::local_space) for
+ better performance if there is no access to global memory.
+ */
+ item_ct1.barrier();
+ tmp = 0.f;
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ tmp += s_sum[lane_id + i * WARP_SIZE];
+ }
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ }
+
+ float variance = tmp / group_size;
+ float scale = sycl::rsqrt(variance + eps);
+ for (int j = start; j < end; j += block_size) {
+ dst[j] *= scale;
+ }
+}
+
+static void rms_norm_f32(const float* x, float* dst, const int ncols, const int64_t stride_row, const int64_t stride_channel,
+ const int64_t stride_sample, const float eps, const sycl::nd_item<3>& item_ct1, float* s_sum, int block_size) {
+
+ const int nrows = item_ct1.get_group_range(2);
+ const int nchannels = item_ct1.get_group_range(1);
+
+ const int sample = item_ct1.get_group(0);
+ const int channel = item_ct1.get_group(1);
+ const int row = item_ct1.get_group(2);
+
+ const int nthreads = item_ct1.get_local_range(2);
+
+ const int tid = item_ct1.get_local_id(2);
+ const int nwarps = nthreads / WARP_SIZE;
+
+ const auto strided_offset = calculate_offset<3>({stride_sample, stride_channel, stride_row}, {sample, channel, row});
+ const auto packed_offset = calculate_offset<3>({nchannels * nrows * ncols, nrows * ncols, ncols}, {sample, channel, row});
+
+ x += strided_offset;
+ dst += packed_offset;
+
+
+ float tmp = 0.0f; // partial sum for thread in warp
+
+ for (int col = tid; col < ncols; col += block_size) {
+ const float xi = x[col];
+ tmp += xi * xi;
+ }
+
+ // sum up partial sums
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ if (block_size > WARP_SIZE) {
+ const auto sub_group = item_ct1.get_sub_group();
+ const auto sg_id = sub_group.get_group_linear_id();
+ const auto wi_in_sg = sub_group.get_local_linear_id();
+ if (wi_in_sg == 0) {
+ s_sum[sg_id] = tmp;
+ }
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ const size_t nreduce = ceil_div(nwarps, WARP_SIZE);
+ tmp = 0.f;
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ tmp += s_sum[wi_in_sg + i * WARP_SIZE];
+ }
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ }
+
+ const float mean = tmp / ncols;
+ const float scale = sycl::rsqrt(mean + eps);
+
+ for (int col = tid; col < ncols; col += block_size) {
+ dst[col] = scale * x[col];
+ }
+}
+
+static void l2_norm_f32(const float* x, float* dst, const int ncols, const float eps,
+ const sycl::nd_item<3>& item_ct1, float* s_sum, int block_size) {
+ const int row = item_ct1.get_group(2) * item_ct1.get_local_range(1) +
+ item_ct1.get_local_id(1);
+ const int tid = item_ct1.get_local_id(2);
+ const int nthreads = item_ct1.get_local_range(2);
+ const int nwarps = nthreads / WARP_SIZE;
+ float tmp = 0.0f; // partial sum for thread in warp
+
+ for (int col = tid; col < ncols; col += block_size) {
+ const float xi = x[row * ncols + col];
+ tmp += xi * xi;
+ }
+
+ // sum up partial sums
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ if (block_size > WARP_SIZE) {
+
+ int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+ if (lane_id == 0) {
+ s_sum[warp_id] = tmp;
+ }
+ /*
+ DPCT1118:3: SYCL group functions and algorithms must be encountered in
+ converged control flow. You may need to adjust the code.
+ */
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+ size_t nreduce = nwarps / WARP_SIZE;
+ tmp = 0.f;
+ for (size_t i = 0; i < nreduce; i += 1)
+ {
+ tmp += s_sum[lane_id + i * WARP_SIZE];
+ }
+ tmp = warp_reduce_sum(tmp, item_ct1);
+ }
+
+ const float scale = sycl::rsqrt(sycl::max(tmp, eps * eps));
+
+ for (int col = tid; col < ncols; col += block_size) {
+ dst[row * ncols + col] = scale * x[row * ncols + col];
+ }
+}
+
+static void norm_f32_sycl(const float * x, float * dst, const int ncols, const int nrows, const int nchannels, const int nsamples,
+ const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample,
+ const float eps, queue_ptr stream, int device) {
+
+ const sycl::range<3> global_dims(nsamples, nchannels, nrows);
+ if (ncols < 1024) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ stream->submit([&](sycl::handler& cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(global_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, nullptr, WARP_SIZE);
+ });
+ });
+ }
+ else {
+ const int work_group_size = ggml_sycl_info().max_work_group_sizes[device];
+ assert(work_group_size % (WARP_SIZE * WARP_SIZE) == 0);
+ const sycl::range<3> block_dims(1, 1, work_group_size);
+ /*
+ DPCT1049:17: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<sycl::float2, 1> s_sum_acc_ct1(
+ sycl::range<1>(work_group_size / WARP_SIZE), cgh);
+ cgh.parallel_for(
+ sycl::nd_range<3>(global_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, get_pointer(s_sum_acc_ct1), work_group_size);
+ });
+ });
+ }
+}
+
+static void group_norm_f32_sycl(const float* x, float* dst,
+ const int num_groups, const float eps, const int group_size,
+ const int ne_elements, queue_ptr stream, int device) {
+ if (group_size < 1024) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ stream->submit([&](sycl::handler& cgh) {
+ const float eps_ct4 = eps;
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ group_norm_f32(
+ x, dst, group_size, ne_elements, eps_ct4, item_ct1,
+ nullptr, WARP_SIZE);
+ });
+ });
+ }
+ else {
+ const int work_group_size = ggml_sycl_info().max_work_group_sizes[device];
+ assert(work_group_size % (WARP_SIZE * WARP_SIZE) == 0);
+ const sycl::range<3> block_dims(1, 1, work_group_size);
+ /*
+ DPCT1049:18: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE),
+ cgh);
+
+ const float eps_ct4 = eps;
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, num_groups) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ group_norm_f32(x, dst, group_size, ne_elements,
+ eps_ct4, item_ct1,
+ get_pointer(s_sum_acc_ct1), work_group_size);
+ });
+ });
+ }
+}
+
+static void rms_norm_f32_sycl(const float* x, float* dst, const int ncols, const int nrows, const int nchannels, const int nsamples,
+ const int64_t stride_row, const int64_t stride_channel, const int64_t stride_sample, const float eps, queue_ptr stream, int device) {
+ // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE);
+
+ const sycl::range<3> global_dims(nsamples, nchannels, nrows);
+ if (ncols < 1024) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ stream->submit([&](sycl::handler& cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(global_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ rms_norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, nullptr, WARP_SIZE);
+ });
+ });
+ }
+ else {
+ const int work_group_size = ggml_sycl_info().max_work_group_sizes[device];
+ assert(work_group_size % (WARP_SIZE * WARP_SIZE) == 0);
+ const sycl::range<3> block_dims(1, 1, work_group_size);
+ /*
+ DPCT1049:19: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE),
+ cgh);
+ cgh.parallel_for(
+ sycl::nd_range<3>(global_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ rms_norm_f32(x, dst, ncols, stride_row, stride_channel, stride_sample, eps, item_ct1, get_pointer(s_sum_acc_ct1), work_group_size);
+ });
+ });
+ }
+}
+
+static void l2_norm_f32_sycl(const float* x, float* dst, const int ncols,
+ const int nrows, const float eps,
+ queue_ptr stream, int device) {
+ // printf("%s ncols=%d, nrows=%d, WARP_SIZE=%d\n", __func__, ncols, nrows, WARP_SIZE);
+ if (ncols < 1024) {
+ const sycl::range<3> block_dims(1, 1, WARP_SIZE);
+ stream->submit([&](sycl::handler& cgh) {
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ l2_norm_f32(x, dst, ncols, eps, item_ct1,
+ nullptr, WARP_SIZE);
+ });
+ });
+ }
+ else {
+ const int work_group_size = ggml_sycl_info().max_work_group_sizes[device];
+ assert(work_group_size % (WARP_SIZE * WARP_SIZE) == 0);
+ const sycl::range<3> block_dims(1, 1, work_group_size);
+ /*
+ DPCT1049:19: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> s_sum_acc_ct1(sycl::range<1>(work_group_size / WARP_SIZE),
+ cgh);
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, nrows) * block_dims,
+ block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ l2_norm_f32(x, dst, ncols, eps, item_ct1,
+ get_pointer(s_sum_acc_ct1), work_group_size);
+ });
+ });
+ }
+}
+
+void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+ const ggml_tensor * src0 = dst->src[0];
+
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ GGML_TENSOR_UNARY_OP_LOCALS
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ float eps;
+ memcpy(&eps, dst->op_params, sizeof(float));
+ GGML_ASSERT(eps >= 0.0f);
+ const size_t ts0 = ggml_type_size(src0->type);
+ GGML_ASSERT(nb00 == ts0);
+ const int64_t s01 = nb01 / ts0;
+ const int64_t s02 = nb02 / ts0;
+ const int64_t s03 = nb03 / ts0;
+
+ norm_f32_sycl(src0_dd, dst_dd, ne00, ne01, ne02, ne03, s01, s02, s03, eps, main_stream, ctx.device);
+}
+
+void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ int num_groups = dst->op_params[0];
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ float eps;
+ memcpy(&eps, dst->op_params + 1, sizeof(float));
+
+ int group_size = dst->src[0]->ne[0] * dst->src[0]->ne[1] * ((dst->src[0]->ne[2] + num_groups - 1) / num_groups);
+ group_norm_f32_sycl(src0_dd, dst_dd, num_groups, eps, group_size, dst->src[0]->ne[0] * dst->src[0]->ne[1] * dst->src[0]->ne[2], main_stream, ctx.device);
+}
+
+void ggml_sycl_op_rms_norm(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+
+ const ggml_tensor * src0 = dst->src[0];
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ float eps;
+ memcpy(&eps, dst->op_params, sizeof(float));
+
+ GGML_TENSOR_UNARY_OP_LOCALS
+ const size_t ts0 = ggml_type_size(src0->type);
+ GGML_ASSERT(nb00 == ts0);
+ const int64_t s01 = nb01 / ts0;
+ const int64_t s02 = nb02 / ts0;
+ const int64_t s03 = nb03 / ts0;
+ rms_norm_f32_sycl(src0_dd, dst_dd, ne00, ne01, ne02, ne03, s01, s02, s03, eps, main_stream, ctx.device);
+}
+
+void ggml_sycl_op_rms_norm_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32); // dz
+ GGML_ASSERT(dst->src[1]->type == GGML_TYPE_F32); // x
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ float eps = 1e-5f;
+ std::memcpy(&eps, dst->op_params, sizeof(float));
+ if (!(eps > 0.0f) || !std::isfinite(eps)) eps = 1e-5f;
+
+ const float * g_base = static_cast<const float *>(dst->src[0]->data); // dz
+ const float * x_base = static_cast<const float *>(dst->src[1]->data); // x
+ float * dx_base = static_cast< float *>(dst->data);
+
+ const int64_t D = dst->ne[0];
+ const int64_t n1 = dst->ne[1], n2 = dst->ne[2], n3 = dst->ne[3]; (void) n3;
+ const int64_t N = ggml_nrows(dst);
+ if (D == 0 || N == 0) return;
+
+ const ggml_tensor *G = dst->src[0];
+ const ggml_tensor *X = dst->src[1];
+ const int ts = (int) ggml_type_size(X->type);
+ GGML_ASSERT((size_t) X->nb[0] == (size_t) ts);
+ GGML_ASSERT((size_t) G->nb[0] == (size_t) ts);
+ GGML_ASSERT((size_t) dst->nb[0] == (size_t) ts);
+
+ const int64_t xs1 = X->nb[1] / ts, xs2 = X->nb[2] / ts, xs3 = X->nb[3] / ts;
+ const int64_t gs1 = G->nb[1] / ts, gs2 = G->nb[2] / ts, gs3 = G->nb[3] / ts;
+ const int64_t ds1 = dst->nb[1] / ts, ds2 = dst->nb[2] / ts, ds3 = dst->nb[3] / ts;
+
+ dpct::queue_ptr q = ctx.stream();
+
+ // work-group size: multiple of WARP_SIZE, capped by device and 256, and not larger than D
+ const int device_max_wg = ggml_sycl_info().max_work_group_sizes[ctx.device];
+ auto roundup = [](int v, int m) { return ((v + m - 1) / m) * m; };
+ int wg_cap = 256;
+ if (device_max_wg > 0) wg_cap = std::min(wg_cap, device_max_wg);
+ int WG = std::max(WARP_SIZE, std::min(roundup((int)std::min<int64_t>(D, wg_cap), WARP_SIZE), wg_cap));
+
+ // FP32 path: per-thread compensated accumulation + hierarchical reduction
+ q->submit([&](sycl::handler &cgh) {
+ const int nwarps_loc = std::max(1, WG / WARP_SIZE);
+ // store one partial value per warp (xx and xg) for cross-warp reduction
+ auto l_xx = sycl::local_accessor<sycl::float2, 1>(sycl::range<1>(nwarps_loc), cgh);
+ auto l_xg = sycl::local_accessor<sycl::float2, 1>(sycl::range<1>(nwarps_loc), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(sycl::range<3>(1, 1, N) * sycl::range<3>(1, 1, WG),
+ sycl::range<3>(1, 1, WG)),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ const int row = item_ct1.get_group(2);
+ const int tid = item_ct1.get_local_id(2);
+
+ const int64_t i1 = row % n1;
+ const int64_t i2 = (row / n1) % n2;
+ const int64_t i3 = row / (n1 * n2);
+
+ const float *__restrict x_row = x_base + i3 * xs3 + i2 * xs2 + i1 * xs1;
+ const float *__restrict g_row = g_base + i3 * gs3 + i2 * gs2 + i1 * gs1;
+ float *__restrict d_row = dx_base + i3 * ds3 + i2 * ds2 + i1 * ds1;
+
+ // per-thread accumulation (compensated by default)
+ float sum_xx = 0.f, sum_xg = 0.f;
+#ifndef GGML_SYCL_RMS_BACK_FAST
+ float c_xx = 0.f, c_xg = 0.f;
+#endif
+ for (int64_t col = tid; col < D; col += WG) {
+ const float xv = x_row[col];
+ const float gv = g_row[col];
+#ifdef GGML_SYCL_RMS_BACK_FAST
+ sum_xx += xv * xv;
+ sum_xg += xv * gv;
+#else
+ float y1 = xv * xv - c_xx;
+ float t1 = sum_xx + y1;
+ c_xx = (t1 - sum_xx) - y1;
+ sum_xx = t1;
+
+ float y2 = xv * gv - c_xg;
+ float t2 = sum_xg + y2;
+ c_xg = (t2 - sum_xg) - y2;
+ sum_xg = t2;
+#endif
+ }
+
+ // warp-level reduction
+ sycl::float2 xx = sycl::float2(sum_xx,
+#ifndef GGML_SYCL_RMS_BACK_FAST
+ c_xx
+#else
+ 0.f
+#endif
+ );
+ sycl::float2 xg = sycl::float2(sum_xg,
+#ifndef GGML_SYCL_RMS_BACK_FAST
+ c_xg
+#else
+ 0.f
+#endif
+ );
+ xx = warp_reduce_sum(xx, item_ct1);
+ xg = warp_reduce_sum(xg, item_ct1);
+
+ // cross-warp reduction using local memory (single barrier)
+ const auto sub_group = item_ct1.get_sub_group();
+ const auto sg_id = sub_group.get_group_linear_id();
+ const auto wi_in_sg = sub_group.get_local_linear_id();
+ const int nthreads = item_ct1.get_local_range(2);
+ const int nwarps = nthreads / WARP_SIZE;
+
+ sycl::float2 xx_total = xx;
+ sycl::float2 xg_total = xg;
+ if (nwarps > 1) {
+ if (wi_in_sg == 0) {
+ l_xx[sg_id] = xx;
+ l_xg[sg_id] = xg;
+ }
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ if (sg_id == 0) {
+ const unsigned wi_u = wi_in_sg;
+ sycl::float2 xx_first = (wi_u < static_cast<unsigned>(nwarps)) ? l_xx[wi_u] : sycl::float2(0.f, 0.f);
+ sycl::float2 xg_first = (wi_u < static_cast<unsigned>(nwarps)) ? l_xg[wi_u] : sycl::float2(0.f, 0.f);
+ xx_total = warp_reduce_sum(xx_first, item_ct1);
+ xg_total = warp_reduce_sum(xg_first, item_ct1);
+ } else {
+ // other subgroups keep their local totals; they'll be ignored
+ xx_total = xx;
+ xg_total = xg;
+ }
+ // ensure all threads see the first-subgroup result via broadcast below
+ }
+
+ // compute inv_r and coeff once per row and broadcast to the whole work-group
+ float inv_r = 0.f;
+ float coeff = 0.f;
+ if (tid == 0) {
+ const float sum_xx_f = xx_total.x() + xx_total.y();
+ const float sum_xdz_f = xg_total.x() + xg_total.y();
+ const float mean_eps = sum_xx_f / (float) D + eps;
+ const float sum_eps = sum_xx_f + eps * (float) D;
+ inv_r = sycl::rsqrt(mean_eps);
+ coeff = -sum_xdz_f / sum_eps;
+ }
+ inv_r = sycl::group_broadcast(item_ct1.get_group(), inv_r);
+ coeff = sycl::group_broadcast(item_ct1.get_group(), coeff);
+
+ for (int64_t col = tid; col < D; col += WG) {
+ d_row[col] = (g_row[col] + coeff * x_row[col]) * inv_r;
+ }
+ });
+ });
+
+}
+
+void ggml_sycl_op_l2_norm(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ const int64_t ne00 = dst->src[0]->ne[0];
+ const int64_t nrows = ggml_nrows(dst->src[0]);
+ const float * src0_dd = static_cast<const float *>(dst->src[0]->data);
+ float * dst_dd = static_cast<float *>(dst->data);
+
+ float eps;
+ memcpy(&eps, dst->op_params, sizeof(float));
+
+ l2_norm_f32_sycl(src0_dd, dst_dd, ne00, nrows, eps, main_stream, ctx.device);
+
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/norm.hpp b/llama.cpp/ggml/src/ggml-sycl/norm.hpp
new file mode 100644
index 0000000..8cb885e
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/norm.hpp
@@ -0,0 +1,28 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_NORM_HPP
+#define GGML_SYCL_NORM_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_norm(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
+
+void ggml_sycl_op_rms_norm(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
+
+void ggml_sycl_op_rms_norm_back(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
+
+void ggml_sycl_op_group_norm(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
+
+void ggml_sycl_op_l2_norm(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
+
+#endif // GGML_SYCL_NORM_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/outprod.cpp b/llama.cpp/ggml/src/ggml-sycl/outprod.cpp
new file mode 100644
index 0000000..f52b11f
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/outprod.cpp
@@ -0,0 +1,47 @@
+#include "outprod.hpp"
+
+void ggml_sycl_op_out_prod(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ const ggml_tensor *src0 = dst->src[0];
+ const ggml_tensor *src1 = dst->src[1];
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(ggml_is_contiguous(src0));
+ GGML_ASSERT(ggml_is_contiguous(dst));
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ // Get SYCL queue
+ dpct::queue_ptr stream = ctx.stream();
+
+ // Dimension checks
+ GGML_ASSERT(ne01 == ne11); // Inner dimensions must match
+ GGML_ASSERT(ne0 == ne00); // Output rows match src0 rows
+ GGML_ASSERT(ne1 == ne10); // Output cols match src1 cols
+
+ // Get data pointers
+ const float* src0_d = (const float*)src0->data;
+ const float* src1_d = (const float*)src1->data;
+ float* dst_d = (float*)dst->data;
+
+ // GEMM parameters
+ const float alpha = 1.0f;
+ const float beta = 0.0f;
+
+ // Handle transposition of src1
+ const bool src1_T = ggml_is_transposed(src1);
+ const oneapi::mkl::transpose src1_op = src1_T ? oneapi::mkl::transpose::nontrans : oneapi::mkl::transpose::trans;
+ const int64_t ldb = (src1_T ? nb10 : nb11) / sizeof(float);
+
+ try {
+ // Perform matrix multiplication using oneMKL GEMM
+ oneapi::mkl::blas::column_major::gemm(*stream, oneapi::mkl::transpose::nontrans, src1_op,
+ ne0, ne1, ne01, alpha, src0_d, ne00, src1_d, ldb, beta, dst_d, ne0);
+ }
+ catch (sycl::exception const& exc) {
+ std::cerr << exc.what() << std::endl;
+ GGML_ASSERT(false);
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/outprod.hpp b/llama.cpp/ggml/src/ggml-sycl/outprod.hpp
new file mode 100644
index 0000000..f50413d
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/outprod.hpp
@@ -0,0 +1,10 @@
+#ifndef GGML_SYCL_OUTPROD_HPP
+#define GGML_SYCL_OUTPROD_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_out_prod(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
+
+
+#endif // GGML_SYCL_OUTPROD_HPP
+
diff --git a/llama.cpp/ggml/src/ggml-sycl/pad.cpp b/llama.cpp/ggml/src/ggml-sycl/pad.cpp
new file mode 100644
index 0000000..f989c5e
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/pad.cpp
@@ -0,0 +1,97 @@
+//
+// MIT license
+// Copyright (C) 2025 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+//#include "common.hpp"
+#include "pad.hpp"
+
+static void pad_f32(const float * src, float * dst,
+ const int lp0, const int rp0, const int lp1, const int rp1,
+ const int lp2, const int rp2, const int lp3, const int rp3,
+ const int ne0, const int ne1, const int ne2, const int ne3,
+ sycl::nd_item<3> item_ct1) {
+ int i0 = item_ct1.get_local_id(2) +
+ item_ct1.get_group(2) * item_ct1.get_local_range(2);
+ int i1 = item_ct1.get_group(1);
+ int i2 = item_ct1.get_group(0) % ne2;
+ int i3 = item_ct1.get_group(0) / ne2;
+ if (i0 >= ne0 || i1 >= ne1 || i2 >= ne2 || i3 >= ne3) {
+ return;
+ }
+
+ // operation
+ const int64_t dst_idx = i3*(ne0*ne1*ne2) + i2*(ne0*ne1) + i1*ne0 + i0;
+ if ((i0 >= lp0 && i0 < ne0 - rp0) &&
+ (i1 >= lp1 && i1 < ne1 - rp1) &&
+ (i2 >= lp2 && i2 < ne2 - rp2) &&
+ (i3 >= lp3 && i3 < ne3 - rp3)) {
+ const int64_t i00 = i0 - lp0;
+ const int64_t i01 = i1 - lp1;
+ const int64_t i02 = i2 - lp2;
+ const int64_t i03 = i3 - lp3;
+ const int64_t ne02 = ne2 - lp2 - rp2;
+ const int64_t ne01 = ne1 - lp1 - rp1;
+ const int64_t ne00 = ne0 - lp0 - rp0;
+
+ const int64_t src_idx = i03 * (ne00 * ne01 * ne02) +
+ i02 * (ne00 * ne01) + i01 * ne00 + i00;
+
+ dst[dst_idx] = src[src_idx];
+ } else {
+ dst[dst_idx] = 0.0f;
+ }
+}
+
+static void pad_f32_sycl(const float *src, float *dst, const int lp0,
+ const int rp0, const int lp1, const int rp1,
+ const int lp2, const int rp2, const int lp3,
+ const int rp3, const int ne0, const int ne1,
+ const int ne2, const int ne3,
+ dpct::queue_ptr stream) {
+ int num_blocks = (ne0 + SYCL_PAD_BLOCK_SIZE - 1) / SYCL_PAD_BLOCK_SIZE;
+ dpct::dim3 gridDim(num_blocks, ne1, ne2 * ne3);
+ stream->parallel_for(
+ sycl::nd_range<3>(gridDim * sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE),
+ sycl::range<3>(1, 1, SYCL_PAD_BLOCK_SIZE)),
+ [=](sycl::nd_item<3> item_ct1) {
+ pad_f32(src, dst, lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3, ne0, ne1,
+ ne2, ne3, item_ct1);
+ });
+}
+
+void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ const ggml_tensor * src0 = dst->src[0];
+ const float * src0_d = (const float *)src0->data;
+ float * dst_d = (float *)dst->data;
+ dpct::queue_ptr stream = ctx.stream();
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+ GGML_ASSERT(ggml_is_contiguous(src0));
+
+ const int32_t lp0 = ((const int32_t*)(dst->op_params))[0];
+ const int32_t rp0 = ((const int32_t*)(dst->op_params))[1];
+ const int32_t lp1 = ((const int32_t*)(dst->op_params))[2];
+ const int32_t rp1 = ((const int32_t*)(dst->op_params))[3];
+ const int32_t lp2 = ((const int32_t*)(dst->op_params))[4];
+ const int32_t rp2 = ((const int32_t*)(dst->op_params))[5];
+ const int32_t lp3 = ((const int32_t*)(dst->op_params))[6];
+ const int32_t rp3 = ((const int32_t*)(dst->op_params))[7];
+
+ pad_f32_sycl(src0_d, dst_d,
+ lp0, rp0, lp1, rp1, lp2, rp2, lp3, rp3,
+ dst->ne[0], dst->ne[1], dst->ne[2], dst->ne[3], stream);
+}
+
+void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ ggml_sycl_op_pad(ctx, dst);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/pad.hpp b/llama.cpp/ggml/src/ggml-sycl/pad.hpp
new file mode 100644
index 0000000..b099e9b
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/pad.hpp
@@ -0,0 +1,24 @@
+//
+// MIT license
+// Copyright (C) 2025 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_PAD_HPP
+#define GGML_SYCL_PAD_HPP
+
+#include "common.hpp"
+
+#define SYCL_PAD_BLOCK_SIZE 256
+
+void ggml_sycl_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_op_pad(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_PAD_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.cpp b/llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.cpp
new file mode 100644
index 0000000..85e9936
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.cpp
@@ -0,0 +1,100 @@
+#include "pad_reflect_1d.hpp"
+
+static void pad_reflect_1d_kernel_f32(
+ const void *__restrict__ src0, void *__restrict__ dst, const int64_t ne0,
+ const int64_t ne00, const sycl::uint3 ne01, const int64_t ne02,
+ const int64_t ne03, const int64_t nb00, const int64_t nb01,
+ const int64_t nb02, const int64_t nb03, const int64_t nb0,
+ const int64_t nb1, const int64_t nb2, const int64_t nb3, const int p0,
+ const int p1, sycl::nd_item<3> item_ct1) {
+
+ const int64_t i3 = item_ct1.get_group(0);
+ const int64_t i2 = item_ct1.get_group(1);
+
+ const sycl::uint2 div_mod_packed =
+ fast_div_modulo(item_ct1.get_group(2), ne01);
+ const int64_t tile1 = div_mod_packed.y();
+ const int64_t tile0 = div_mod_packed.x();
+ const int64_t i1 = tile1;
+ const int64_t i0 =
+ item_ct1.get_local_id(2) + tile0 * item_ct1.get_local_range(2);
+
+ if (i0 >= ne0 || i1 >= ne01.z() || i2 >= ne02 || i3 >= ne03) {
+ return;
+ }
+
+ const char *src0_ptr =
+ (const char *)src0 + i3 * nb03 + i2 * nb02 + i1 * nb01;
+ char *dst_ptr = (char *)dst + i3 * nb3 + i2 * nb2 + i1 * nb1;
+
+ const int64_t rel_i0 = i0 - p0; // relative i0 in src0
+ int64_t src_idx;
+
+ if (rel_i0 < 0) {
+ // Left padding - reflect
+ src_idx = -rel_i0;
+ } else if (rel_i0 < ne00) {
+ // Middle - copy
+ src_idx = rel_i0;
+ } else {
+ // Right padding - reflect
+ src_idx = 2 * ne00 - 2 - rel_i0;
+ }
+ const float value = *(const float *)(src0_ptr + src_idx * nb00);
+ *(float *)(dst_ptr + i0 * nb0) = value;
+
+ GGML_UNUSED(p1);
+}
+
+void ggml_sycl_op_pad_reflect_1d(ggml_backend_sycl_context &ctx,
+ ggml_tensor *dst) {
+
+ const ggml_tensor *src0 = dst->src[0];
+ dpct::queue_ptr stream = ctx.stream();
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ const int32_t *opts = (const int32_t *)dst->op_params;
+ const int p0 = opts[0];
+ const int p1 = opts[1];
+
+ const int64_t ne00 = src0->ne[0];
+ const int64_t ne01 = src0->ne[1];
+ const sycl::uint3 ne01_packed = init_fastdiv_values(ne01);
+ const int64_t ne02 = src0->ne[2];
+ const int64_t ne03 = src0->ne[3];
+
+ const int64_t ne0 = dst->ne[0];
+
+ GGML_ASSERT(ne0 == ne00 + p0 + p1);
+
+ constexpr int64_t bx = SYCL_PAD_REFLECT_1D_BLOCK_SIZE;
+ const int64_t tiles0 = (ne0 + bx - 1) / bx;
+ const dpct::dim3 grid_dims((unsigned)(ne01 * tiles0), (unsigned)ne02,
+ (unsigned)ne03);
+ const dpct::dim3 block_dims((unsigned)bx, 1, 1);
+
+ stream->submit([&](sycl::handler &cgh) {
+ auto src0_data_ct0 = src0->data;
+ auto dst_data_ct1 = dst->data;
+ auto src0_nb_ct7 = src0->nb[0];
+ auto src0_nb_ct8 = src0->nb[1];
+ auto src0_nb_ct9 = src0->nb[2];
+ auto src0_nb_ct10 = src0->nb[3];
+ auto dst_nb_ct11 = dst->nb[0];
+ auto dst_nb_ct12 = dst->nb[1];
+ auto dst_nb_ct13 = dst->nb[2];
+ auto dst_nb_ct14 = dst->nb[3];
+
+ cgh.parallel_for(sycl::nd_range<3>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ pad_reflect_1d_kernel_f32(
+ src0_data_ct0, dst_data_ct1, ne0, ne00,
+ ne01_packed, ne02, ne03, src0_nb_ct7,
+ src0_nb_ct8, src0_nb_ct9, src0_nb_ct10,
+ dst_nb_ct11, dst_nb_ct12, dst_nb_ct13,
+ dst_nb_ct14, p0, p1, item_ct1);
+ });
+ });
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.hpp b/llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.hpp
new file mode 100644
index 0000000..45aaf9a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/pad_reflect_1d.hpp
@@ -0,0 +1,10 @@
+#ifndef GGML_SYCL_PAD_REFLECT_1D_HPP
+#define GGML_SYCL_PAD_REFLECT_1D_HPP
+
+#include "common.hpp"
+
+#define SYCL_PAD_REFLECT_1D_BLOCK_SIZE 256
+
+void ggml_sycl_op_pad_reflect_1d(ggml_backend_sycl_context& ctx, ggml_tensor* dst);
+
+#endif // GGML_SYCL_PAD_REFLECT_1D_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/presets.hpp b/llama.cpp/ggml/src/ggml-sycl/presets.hpp
new file mode 100644
index 0000000..b651737
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/presets.hpp
@@ -0,0 +1,76 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_PRESETS_HPP
+#define GGML_SYCL_PRESETS_HPP
+
+#define GGML_SYCL_MAX_STREAMS 8
+#define GGML_SYCL_MAX_BUFFERS 256
+
+#define WARP_SIZE GGML_SYCL_WARP_SIZE
+#define MATRIX_ROW_PADDING 512 // last row of quant. matrices is a multiple of this to avoid out-of-bounds memory accesses
+
+#define SYCL_GELU_BLOCK_SIZE 256
+#define SYCL_SILU_BLOCK_SIZE 256
+#define SYCL_TANH_BLOCK_SIZE 256
+#define SYCL_RELU_BLOCK_SIZE 256
+#define SYCL_HARDSIGMOID_BLOCK_SIZE 256
+#define SYCL_HARDSWISH_BLOCK_SIZE 256
+#define SYCL_EXP_BLOCK_SIZE 256
+#define SYCL_NEG_BLOCK_SIZE 256
+#define SYCL_SIGMOID_BLOCK_SIZE 256
+#define SYCL_SQRT_BLOCK_SIZE 256
+#define SYCL_SIN_BLOCK_SIZE 256
+#define SYCL_SQR_BLOCK_SIZE 256
+#define SYCL_SET_BLOCK_SIZE 256
+#define SYCL_CPY_BLOCK_SIZE 32
+#define SYCL_SCALE_BLOCK_SIZE 256
+#define SYCL_CLAMP_BLOCK_SIZE 256
+#define SYCL_ROPE_BLOCK_SIZE 256
+#define SYCL_ALIBI_BLOCK_SIZE 32
+#define SYCL_DIAG_MASK_INF_BLOCK_SIZE 32
+#define SYCL_QUANTIZE_BLOCK_SIZE 256
+#define SYCL_DEQUANTIZE_BLOCK_SIZE 256
+#define SYCL_GET_ROWS_BLOCK_SIZE 256
+#define SYCL_UPSCALE_BLOCK_SIZE 256
+#define SYCL_CONCAT_BLOCK_SIZE 256
+#define SYCL_PAD_BLOCK_SIZE 256
+#define SYCL_ACC_BLOCK_SIZE 256
+#define SYCL_IM2COL_BLOCK_SIZE 256
+#define SYCL_POOL2D_BLOCK_SIZE 256
+#define SYCL_ARGMAX_BLOCK_SIZE 256
+#define SYCL_CONV_TRANPOSE_1D_BLOCK_SIZE 256
+#define SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE 256
+#define SYCL_ARANGE_BLOCK_SIZE 256
+
+// dmmv = dequantize_mul_mat_vec
+#ifndef GGML_SYCL_DMMV_X
+#define GGML_SYCL_DMMV_X 32
+#endif
+#ifndef GGML_SYCL_MMV_Y
+#define GGML_SYCL_MMV_Y 1
+#endif
+
+#ifndef K_QUANTS_PER_ITERATION
+#define K_QUANTS_PER_ITERATION 2
+#else
+static_assert(K_QUANTS_PER_ITERATION == 1 || K_QUANTS_PER_ITERATION == 2, "K_QUANTS_PER_ITERATION must be 1 or 2");
+#endif
+
+#ifndef GGML_SYCL_PEER_MAX_BATCH_SIZE
+#define GGML_SYCL_PEER_MAX_BATCH_SIZE 128
+#endif // GGML_SYCL_PEER_MAX_BATCH_SIZE
+
+#define MUL_MAT_SRC1_COL_STRIDE 128
+
+#define QK_WARP_SIZE 32
+#endif // GGML_SYCL_PRESETS_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/quantize.hpp b/llama.cpp/ggml/src/ggml-sycl/quantize.hpp
new file mode 100644
index 0000000..b5c7a54
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/quantize.hpp
@@ -0,0 +1,133 @@
+/***************************************************************************
+ *
+ * Copyright (C) 2025 Codeplay Software Ltd.
+ * Copyright (C) 2025 Intel Corporation
+ *
+ * MIT License
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * quantize.hpp
+ *
+ * Description:
+ * Sycl backend specific quantization functions
+ **************************************************************************/
+
+#pragma once
+
+#include <sycl/nd_item.hpp>
+
+#include "ggml-sycl/dpct/helper.hpp"
+
+template <int ElementsPerWI>
+__dpct_inline__ static void quantize_q8_1_impl(const float * __restrict__ x,
+ sycl::vec<int8_t, ElementsPerWI> & quantized_values, float & d,
+ float & sum, const sycl::nd_item<1> & it) {
+ auto subgroup_id = it.get_group(0);
+ auto wi_id = it.get_local_id(0);
+
+ sycl::vec<float, ElementsPerWI> wi_f32_vals;
+
+ auto float_ptr_offset = subgroup_id * QK8_1 + ElementsPerWI * wi_id;
+ wi_f32_vals = *reinterpret_cast<const sycl::vec<float, ElementsPerWI> *>(x + float_ptr_offset);
+
+ float amax = 0.0f;
+
+#pragma unroll(ElementsPerWI)
+ for (int i = 0; i < ElementsPerWI; i++) {
+ sum += wi_f32_vals[i];
+ amax = sycl::fmax(amax, sycl::fabs(wi_f32_vals[i]));
+ quantized_values[i] = 0;
+ }
+ sum = sycl::reduce_over_group(it.get_sub_group(), sum, sycl::plus<float>());
+ amax = sycl::reduce_over_group(it.get_sub_group(), amax, sycl::maximum<float>());
+ d = amax == 0 ? 1 : amax / 127;
+
+#pragma unroll(ElementsPerWI)
+ for (int i = 0; i < ElementsPerWI; i++) {
+ quantized_values[i] = sycl::round(wi_f32_vals[i] / d);
+ }
+
+ d = amax == 0 ? 0 : d;
+}
+
+// No op to control codepath in ggml_sycl_op_mul_mat
+template <int ElementsPerWI> struct no_quantize_q8_1 {
+ void operator()(const float *, void *, int, int, const sycl::nd_item<1> &) const {}
+};
+
+template <int ElementsPerWI> struct quantize_and_reorder_q8_1_soa {
+ __dpct_inline__ void operator()(const float * __restrict__ x, void * reordered_q8_tensor, const int kx,
+ const int kx_padded, const sycl::nd_item<1> & it) const {
+ /*
+ Quantizes and reorders the resultant q8 tensor in a per row fashion
+ Each sub-group calculates one quant block. i.e. QK8_1 quant values and the d and sum values
+ */
+ auto subgroup_id = it.get_group(0);
+ auto wi_id = it.get_local_id(0);
+
+ sycl::vec<int8_t, ElementsPerWI> quantized_values;
+ float d = 0.0f;
+ float sum = 0.0f;
+ quantize_q8_1_impl<ElementsPerWI>(x, quantized_values, d, sum, it);
+
+ const int num_blocks_per_row = kx / QK8_1;
+ auto row = subgroup_id / num_blocks_per_row;
+ auto col = subgroup_id % num_blocks_per_row;
+ auto row_offset = row * (kx_padded / QK8_1) * sizeof(block_q8_1);
+ auto col_offset = QK8_1 * col + wi_id * ElementsPerWI;
+
+ auto quant_ptr = (int8_t *) ((char *) reordered_q8_tensor + row_offset + col_offset);
+ *reinterpret_cast<sycl::vec<int8_t, ElementsPerWI> *>(quant_ptr) = quantized_values;
+
+ auto ds_ptr = (sycl::half2 *) ((char *) reordered_q8_tensor + row_offset + kx + col * sizeof(sycl::half2));
+ if (wi_id == 0) {
+ *ds_ptr = sycl::half2(sycl::half(d), sycl::half(sum));
+ }
+ }
+};
+
+template <int ElementsPerWI> struct quantize_q8_1 {
+ __dpct_inline__ void operator()(const float * __restrict__ x, void * q8_tensor, const int kx, const int kx_padded,
+ const sycl::nd_item<1> & it) const {
+ auto subgroup_id = it.get_group(0);
+ auto wi_id = it.get_local_id(0);
+
+ const int num_blocks_per_row = kx / QK8_1;
+ auto row = subgroup_id / num_blocks_per_row;
+ const int pitch = kx_padded / QK8_1;
+
+ sycl::vec<int8_t, ElementsPerWI> quantized_values;
+ float d = 0.0f;
+ float sum = 0.0f;
+ quantize_q8_1_impl<ElementsPerWI>(x, quantized_values, d, sum, it);
+
+ block_q8_1 * quant_ptr = (block_q8_1 *) q8_tensor;
+ auto block_id = subgroup_id % num_blocks_per_row + row * pitch;
+
+ int8_t * qs = &(quant_ptr[block_id].qs[wi_id * ElementsPerWI]);
+ *reinterpret_cast<sycl::vec<int8_t, ElementsPerWI> *>(qs) = quantized_values;
+ if (wi_id == 0) {
+ quant_ptr[block_id].ds = sycl::half2(sycl::half(d), sycl::half(sum));
+ }
+ }
+};
+
+template <template <int> typename quantize_f>
+void quantize_row_q8_1_sycl(const float * x, void * vy, const int kx, const int ky, const int kx_padded,
+ dpct::queue_ptr stream) {
+ static_assert(QK8_1 % WARP_SIZE == 0);
+ auto local_range = std::size_t(WARP_SIZE);
+ auto num_quant_blocks = ky * (kx / QK8_1);
+ auto global_range = num_quant_blocks * local_range;
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ stream->parallel_for(sycl::nd_range<1>({ global_range }, { local_range }),
+ [=](sycl::nd_item<1> it) [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ quantize_f<QK8_1 / WARP_SIZE>()(x, vy, kx, kx_padded, it);
+ });
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/quants.hpp b/llama.cpp/ggml/src/ggml-sycl/quants.hpp
new file mode 100644
index 0000000..d0d5ac9
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/quants.hpp
@@ -0,0 +1,110 @@
+//
+// MIT license
+// Copyright (C) 2025 Codeplay Software Ltd.
+// Copyright (C) 2025 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_QUANTS_HPP
+#define GGML_SYCL_QUANTS_HPP
+
+#include <utility>
+
+#include "ggml-common.h"
+#include "ggml.h"
+
+namespace ggml_sycl_reordered {
+
+// The reordered block moves quants (qs) and scales(d) to two
+// uniform regions of memory that is contiguous in the same tensor.
+// What this means is that instead of having:
+// [d0, qs0] [d1, qs1] [d2, qs2] ... [dN, qsN]
+// We have:
+// [qs0, qs1, qs2, ..., qsN] [d0, d1, d2, ..., dN]
+//
+// Notes: out-of-bounds qs will run into d values
+// Aligment relies on the allocated size of qs
+
+template <ggml_type type> struct block_q_t;
+
+// qk number of weights / quants in a block
+// qr number of weights in a byte (described as 'before dequantization')
+// for quantization types that has low and high bits split, qr is calculated with
+// using the lower bits, e.g for Q6 quants QR6 is 2
+// qi number of 32 bit integers needed to represent all the quants from a block (`qs` field)
+// See ggml-common.h to see how these are calculated
+template <> struct block_q_t<GGML_TYPE_Q4_0> {
+ struct traits {
+ static constexpr uint32_t qk = QK4_0;
+ static constexpr uint32_t qi = QI4_0;
+ static constexpr uint32_t qr = QR4_0;
+ static constexpr uint32_t vdr_mmvq = 2;
+ };
+
+ static constexpr std::pair<int, int> get_block_offset(const int block_index, const int /* nblocks */) {
+ return { block_index * (QK4_0 / QR4_0), 0 };
+ }
+
+ static constexpr std::pair<int, int> get_d_offset(int nrows, int ncols, const int block_index) {
+ return { (ncols / QR4_0 * nrows) + block_index * sizeof(ggml_half), 0 };
+ }
+
+ static constexpr int block_to_q8_1_ratio() { return traits::qk / QK8_1; }
+};
+
+template <> struct block_q_t<GGML_TYPE_Q4_K> {
+ struct traits {
+ static constexpr uint32_t qk = QK_K;
+ static constexpr uint32_t qi = QI4_K;
+ static constexpr uint32_t qr = QR4_K;
+ static constexpr uint32_t vdr_mmvq = 2;
+ };
+
+ static constexpr std::pair<int, int> get_block_offset(const int block_index, const int /* nblocks */) {
+ return { block_index * (traits::qk / traits::qr), 0 };
+ }
+
+ static constexpr std::pair<int, int> get_d_offset(int nrows, int ncols, const int block_index) {
+ auto nblocks = (nrows * (ncols / QK_K));
+ return { nblocks * (QK_K / 2) + (block_index * K_SCALE_SIZE),
+ (nblocks * QK_K / 2) + (nblocks * K_SCALE_SIZE) + (block_index * sizeof(ggml_half2)) };
+ }
+
+ static constexpr int block_to_q8_1_ratio() { return traits::qk / QK8_1; }
+};
+
+template <> struct block_q_t<GGML_TYPE_Q6_K> {
+ struct traits {
+ static constexpr uint32_t qk = QK_K;
+ static constexpr uint32_t qi = QI6_K;
+ static constexpr uint32_t qr = QR6_K;
+ static constexpr uint32_t vdr_mmvq = 1;
+ };
+
+ static constexpr std::pair<int, int> get_block_offset(const int block_index, const int n_blocks) {
+ auto low_bits_index = block_index * (QK_K / QR6_K);
+ // the index of high bits it's after all low bits
+ auto high_bits_index = n_blocks * (QK_K / 2) + (block_index * (QK_K / 4));
+ return { low_bits_index, high_bits_index };
+ }
+
+ static constexpr std::pair<int, int> get_d_offset(int nrows, int ncols, const int block_index) {
+ auto nblocks = (nrows * (ncols / QK_K));
+ auto total_qs_bytes = nblocks * (QK_K / 2) + nblocks * (QK_K / 4);
+ auto block_scales = total_qs_bytes + block_index * (QK_K / 16);
+ auto sb_scale = total_qs_bytes + nblocks * (QK_K / 16) + block_index * sizeof(ggml_half);
+ return { block_scales, sb_scale };
+ }
+
+ static constexpr int block_to_q8_1_ratio() { return traits::qk / QK8_1; }
+};
+
+} // namespace ggml_sycl_reordered
+
+#endif // GGML_SYCL_QUANTS_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/repeat_back.cpp b/llama.cpp/ggml/src/ggml-sycl/repeat_back.cpp
new file mode 100644
index 0000000..845b484
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/repeat_back.cpp
@@ -0,0 +1,76 @@
+#include "repeat_back.hpp"
+
+#include "common.hpp"
+
+#define GGML_ASSERT_TENSOR_FITS_INT(t) \
+ GGML_ASSERT((t)->ne[0] < INT_MAX && (t)->ne[1] < INT_MAX && (t)->ne[2] < INT_MAX && (t)->ne[3] < INT_MAX)
+
+void ggml_sycl_op_repeat_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ const float * src0_dd = (const float *) dst->src[0]->data;
+ float * dst_dd = (float *) dst->data;
+
+ GGML_ASSERT_TENSOR_FITS_INT(dst);
+ GGML_ASSERT_TENSOR_FITS_INT(dst->src[0]);
+
+ const int ne0 = dst->ne[0], ne1 = dst->ne[1], ne2 = dst->ne[2], ne3 = dst->ne[3];
+ const int ne00 = dst->src[0]->ne[0], ne01 = dst->src[0]->ne[1], ne02 = dst->src[0]->ne[2],
+ ne03 = dst->src[0]->ne[3];
+
+ const int nr0 = ne00 / ne0;
+ const int nr1 = ne01 / ne1;
+ const int nr2 = ne02 / ne2;
+ const int nr3 = ne03 / ne3;
+
+ const int nb0 = dst->src[0]->nb[0];
+ const int nb1 = dst->src[0]->nb[1];
+ const int nb2 = dst->src[0]->nb[2];
+ const int nb3 = dst->src[0]->nb[3];
+
+ const char * base = (const char *) src0_dd;
+
+ const size_t total = (size_t) ne0 * ne1 * ne2 * ne3;
+ constexpr int BLOCK_SIZE = 256;
+ const int num_blocks = (total + BLOCK_SIZE - 1) / BLOCK_SIZE;
+
+ const float inv_ne0 = 1.0f / ne0;
+ const float inv_ne_01 = 1.0f / (ne0 * ne1);
+ const float inv_ne_012 = 1.0f / (ne0 * ne1 * ne2);
+ const int repeat_count = nr0 * nr1 * nr2 * nr3;
+
+ queue_ptr stream = ctx.stream();
+
+ stream->parallel_for(
+ sycl::nd_range<1>(sycl::range<1>(num_blocks * BLOCK_SIZE), sycl::range<1>(BLOCK_SIZE)),
+ [=](sycl::nd_item<1> item_ct1) {
+ const size_t i = item_ct1.get_global_linear_id();
+ if (i >= total) {
+ return;
+ }
+
+ const int i3 = (int) (i * inv_ne_012);
+ const int i2 = (int) (i * inv_ne_01) - i3 * ne2;
+ const int i1 = (int) (i * inv_ne0) - (int) (i * inv_ne_01) * ne1;
+ const int i0 = i - (int) (i * inv_ne0) * ne0;
+
+ int j0 = 0, j1 = 0, j2 = 0, j3 = 0;
+ float acc = 0.0f;
+
+ for (int j = 0; j < repeat_count; ++j) {
+ const float * ptr = (const float *) (base + (i0 + j0 * ne0) * nb0 + (i1 + j1 * ne1) * nb1 +
+ (i2 + j2 * ne2) * nb2 + (i3 + j3 * ne3) * nb3);
+ acc += *ptr;
+
+ int carry = (++j0 >= nr0);
+ j0 -= carry * nr0;
+ carry = (carry && (++j1 >= nr1));
+ j1 -= carry * nr1;
+ carry = (carry && (++j2 >= nr2));
+ j2 -= carry * nr2;
+ j3 += carry;
+ }
+ dst_dd[i] = acc;
+ });
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/repeat_back.hpp b/llama.cpp/ggml/src/ggml-sycl/repeat_back.hpp
new file mode 100644
index 0000000..17a87f3
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/repeat_back.hpp
@@ -0,0 +1,8 @@
+#ifndef GGML_SYCL_REPEAT_BACK_HPP
+#define GGML_SYCL_REPEAT_BACK_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_repeat_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_REPEAT_BACK_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/roll.cpp b/llama.cpp/ggml/src/ggml-sycl/roll.cpp
new file mode 100644
index 0000000..1e05181
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/roll.cpp
@@ -0,0 +1,122 @@
+#include "roll.hpp"
+#include "common.hpp"
+
+using namespace sycl;
+
+static inline int wrap_add(int i, int shift, int n) {
+
+ int s = i + shift;
+ return (s >= n) ? (s - n) : s;
+}
+
+static void kernel_roll_fused_i0_i1(
+ queue &q,
+ const float *src_d,
+ float *dst_d,
+ int ne0, int ne1, int ne2, int ne3,
+ int sh0, int sh1, int sh2, int sh3)
+{
+ if (ne0 == 0 || ne1 == 0 || ne2 == 0 || ne3 == 0) return;
+
+
+ const int stride1 = ne0;
+ const int stride2 = ne0 * ne1;
+ const int stride3 = ne0 * ne1 * ne2;
+
+
+ const int shNe0 = (ne0 - sh0) % ne0;
+ const int shNe1 = (ne1 - sh1) % ne1;
+ const int shNe2 = (ne2 - sh2) % ne2;
+ const int shNe3 = (ne3 - sh3) % ne3;
+
+
+ const size_t g0 = (size_t) ne3;
+ const size_t g1 = (size_t) ne2;
+ const size_t g2 = (size_t) (ne1 * ne0);
+
+ const range<3> global{ g0, g1, g2 };
+
+ q.submit([&](handler &h) {
+ h.parallel_for(global, [=](id<3> idx) {
+ const int i3 = (int) idx[0];
+ const int i2 = (int) idx[1];
+
+ const int fused = (int) idx[2];
+ const int i1 = fused / ne0;
+ const int i0 = fused - i1 * ne0; // fused % ne0
+
+
+ const int idx_dst = i0
+ + i1 * stride1
+ + i2 * stride2
+ + i3 * stride3;
+
+
+ const int s0 = wrap_add(i0, shNe0, ne0);
+ const int s1 = wrap_add(i1, shNe1, ne1);
+ const int s2 = wrap_add(i2, shNe2, ne2);
+ const int s3 = wrap_add(i3, shNe3, ne3);
+
+ const int idx_src = s0
+ + s1 * stride1
+ + s2 * stride2
+ + s3 * stride3;
+
+ dst_d[idx_dst] = src_d[idx_src];
+ });
+ });
+}
+
+void ggml_sycl_roll(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ const ggml_tensor *src = dst->src[0];
+ GGML_ASSERT(src && src->type == GGML_TYPE_F32);
+
+ const int ne0 = (int) dst->ne[0];
+ const int ne1 = (int) dst->ne[1];
+ const int ne2 = (int) dst->ne[2];
+ const int ne3 = (int) dst->ne[3];
+
+ const int32_t *params = (const int32_t *) dst->op_params;
+ int shift0 = params[0];
+ int shift1 = params[1];
+ int shift2 = params[2];
+ int shift3 = params[3];
+
+
+ if ((shift0 | shift1 | shift2 | shift3) == 0) {
+ const size_t nb = ggml_nbytes(src);
+ queue *q = ctx.stream();
+ SYCL_CHECK(CHECK_TRY_ERROR(q->memcpy(dst->data, src->data, nb)));
+ return;
+ }
+
+ auto norm = [](int sh, int n) -> int {
+ if (n <= 0) return 0;
+ sh %= n;
+ if (sh < 0) sh += n;
+ return sh;
+ };
+ shift0 = norm(shift0, ne0);
+ shift1 = norm(shift1, ne1);
+ shift2 = norm(shift2, ne2);
+ shift3 = norm(shift3, ne3);
+
+ try {
+ queue *q = ctx.stream();
+
+ const float *src_d = (const float *) src->data;
+ float *dst_d = (float *) dst->data;
+ GGML_ASSERT(src_d && dst_d);
+
+ kernel_roll_fused_i0_i1(
+ *q, src_d, dst_d,
+ ne0, ne1, ne2, ne3,
+ shift0, shift1, shift2, shift3
+ );
+ } catch (const std::exception &e) {
+ std::fprintf(stderr, "[SYCL-ROLL] ERROR: %s\n", e.what());
+ throw;
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/roll.hpp b/llama.cpp/ggml/src/ggml-sycl/roll.hpp
new file mode 100644
index 0000000..97dc03d
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/roll.hpp
@@ -0,0 +1,20 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_ROLL_HPP
+#define GGML_SYCL_ROLL_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_roll(ggml_backend_sycl_context & ctx, ggml_tensor *dst);
+
+#endif // GGML_SYCL_ROLL_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/rope.cpp b/llama.cpp/ggml/src/ggml-sycl/rope.cpp
new file mode 100644
index 0000000..aeaa58b
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/rope.cpp
@@ -0,0 +1,477 @@
+#include "rope.hpp"
+#include "ggml-sycl/common.hpp"
+#include "ggml.h"
+
+struct rope_corr_dims {
+ float v[2];
+};
+
+struct mrope_sections {
+ int v[4];
+};
+
+static float rope_yarn_ramp(const float low, const float high, const int i0) {
+ const float y = (i0 / 2 - low) / sycl::max(0.001f, high - low);
+ return 1.0f - sycl::min(1.0f, sycl::max(0.0f, y));
+}
+
+// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
+// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
+static void rope_yarn(
+ float theta_extrap, float freq_scale, rope_corr_dims corr_dims, int64_t i0, float ext_factor, float mscale,
+ float * cos_theta, float * sin_theta) {
+ // Get n-d rotational scaling corrected for extrapolation
+ float theta_interp = freq_scale * theta_extrap;
+ float theta = theta_interp;
+ if (ext_factor != 0.0f) {
+ float ramp_mix = rope_yarn_ramp(corr_dims.v[0], corr_dims.v[1], i0) * ext_factor;
+ theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
+
+ // Get n-d magnitude scaling corrected for interpolation
+ mscale *= 1.0f + 0.1f * sycl::log(1.0f / freq_scale);
+ }
+ *cos_theta = sycl::cos(theta) * mscale;
+ *sin_theta = sycl::sin(theta) * mscale;
+}
+
+template <typename T, bool has_ff>
+static void rope_norm(const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims,
+ const int32_t * pos, float freq_scale, float ext_factor, float attn_factor,
+ const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors,
+ const sycl::nd_item<3> & item_ct1) {
+ const int i0 = 2 * (item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1));
+
+ if (i0 >= ne0) {
+ return;
+ }
+
+ const int row = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2);
+
+ const int row0 = row % ne1;
+ const int channel0 = row / ne1;
+
+ const int i = row * ne0 + i0;
+ const int i2 = channel0 * s2 + row0 * s1 + i0;
+
+ if (i0 >= n_dims) {
+ *reinterpret_cast<sycl::vec<T, 2> *>(dst + i) = *reinterpret_cast<const sycl::vec<T, 2> *>(x + i2);
+ return;
+ }
+
+ const float theta_base = pos[channel0] * sycl::pow(theta_scale, i0 / 2.0f);
+
+ const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f;
+
+ float cos_theta;
+ float sin_theta;
+
+ rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
+
+ const float x0 = x[i2 + 0];
+ const float x1 = x[i2 + 1];
+
+ dst[i + 0] = x0 * cos_theta - x1 * sin_theta;
+ dst[i + 1] = x0 * sin_theta + x1 * cos_theta;
+}
+
+template <typename T, bool has_ff>
+static void rope_neox(const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2, const int n_dims,
+ const int32_t * pos, const float freq_scale, const float ext_factor, const float attn_factor,
+ const rope_corr_dims corr_dims, const float theta_scale, const float * freq_factors,
+ const sycl::nd_item<3> & item_ct1) {
+ const int i0 = 2 * (item_ct1.get_local_range(1) * item_ct1.get_group(1) + item_ct1.get_local_id(1));
+
+ if (i0 >= ne0) {
+ return;
+ }
+
+ const int row = item_ct1.get_local_range(2) * item_ct1.get_group(2) + item_ct1.get_local_id(2);
+
+ const int row0 = row % ne1;
+ const int channel0 = row / ne1;
+
+ const int i = row * ne0 + i0 / 2;
+ const int i2 = channel0 * s2 + row0 * s1 + i0 / 2;
+
+ if (i0 >= n_dims) {
+ *reinterpret_cast<sycl::vec<T, 2> *>(dst + i + i0 / 2) = *reinterpret_cast<const sycl::vec<T, 2> *>(x + i2 + i0 / 2);
+ return;
+ }
+
+ const float theta_base = pos[channel0] * sycl::pow(theta_scale, i0 / 2.0f);
+
+ const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f;
+
+ float cos_theta;
+ float sin_theta;
+
+ rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
+
+ const float x0 = x[i2 + 0];
+ const float x1 = x[i2 + n_dims / 2];
+
+ dst[i + 0] = x0 * cos_theta - x1 * sin_theta;
+ dst[i + n_dims / 2] = x0 * sin_theta + x1 * cos_theta;
+}
+
+template <typename T, bool has_ff>
+static void rope_multi(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1,
+ const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale,
+ const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims,
+ const float theta_scale, const float * freq_factors, const mrope_sections sections,
+ const bool is_imrope, const sycl::nd_item<3> & item_ct1) {
+ // get index pos
+ const int i0 = 2 * (item_ct1.get_group(1) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1));
+ if (i0 >= ne0) {
+ return;
+ }
+ const int row_dst = (item_ct1.get_group(2) * item_ct1.get_local_range(2)) + item_ct1.get_local_id(2);
+
+ const int row_x = row_dst % ne1;
+ const int channel_x = row_dst / ne1;
+ const int idst = (row_dst * ne0) + (i0 / 2);
+ const size_t ix = ((size_t) channel_x * s2) + ((size_t) row_x * s1) + (i0 / 2);
+
+ if (i0 >= n_dims) {
+ *reinterpret_cast<sycl::vec<T, 2> *>(dst + idst + i0 / 2) = *reinterpret_cast<const sycl::vec<T, 2> *>(x + i0 / 2 + ix);
+ return;
+ }
+
+ const int sect_dims = sections.v[0] + sections.v[1] + sections.v[2] + sections.v[3];
+ const int sec_w = sections.v[1] + sections.v[0];
+ const int sector = (i0 / 2) % sect_dims;
+
+
+ float theta_base = 0.0;
+ if (is_imrope) {
+ if (sector % 3 == 1 && sector < 3 * sections.v[1]) {
+ theta_base = pos[channel_x + ne2 * 1]*sycl::pow(theta_scale, i0/2.0f);
+ } else if (sector % 3 == 2 && sector < 3 * sections.v[2]) {
+ theta_base = pos[channel_x + ne2 * 2]*sycl::pow(theta_scale, i0/2.0f);
+ } else if (sector % 3 == 0 && sector < 3 * sections.v[0]) {
+ theta_base = pos[channel_x]*sycl::pow(theta_scale, i0/2.0f);
+ } else {
+ theta_base = pos[channel_x + ne2 * 3]*sycl::pow(theta_scale, i0/2.0f);
+ }
+ } else {
+ if (sector < sections.v[0]) {
+ theta_base = pos[channel_x]*sycl::pow(theta_scale, i0/2.0f);
+ }
+ else if (sector >= sections.v[0] && sector < sec_w) {
+ theta_base = pos[channel_x + ne2 * 1]*sycl::pow(theta_scale, i0/2.0f);
+ }
+ else if (sector >= sec_w && sector < sec_w + sections.v[2]) {
+ theta_base = pos[channel_x + ne2 * 2]*sycl::pow(theta_scale, i0/2.0f);
+ }
+ else if (sector >= sec_w + sections.v[2]) {
+ theta_base = pos[channel_x + ne2 * 3]*sycl::pow(theta_scale, i0/2.0f);
+ }
+ }
+
+ const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f;
+ float cos_theta;
+ float sin_theta;
+ rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
+ const float x0 = x[ix + 0];
+ const float x1 = x[ix + n_dims/2];
+
+ // store results in dst
+ dst[idst + 0] = x0 * cos_theta - x1 * sin_theta;
+ dst[idst + n_dims/2] = x0 * sin_theta + x1 * cos_theta;
+}
+
+
+
+template <typename T, bool has_ff>
+static void rope_vision(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1,
+ const size_t s2, const int n_dims, const int32_t * pos, const float freq_scale,
+ const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims,
+ const float theta_scale, const float * freq_factors, const mrope_sections sections,
+ const sycl::nd_item<3> & item_ct1) {
+ // get index pos
+ const int i0 = 2 * (item_ct1.get_group(1) * item_ct1.get_local_range(1) + item_ct1.get_local_id(1));
+ if (i0 >= ne0) {
+ return;
+ }
+ const int row_dst = (item_ct1.get_group(2) * item_ct1.get_local_range(2)) + item_ct1.get_local_id(2);
+ const int row_x = row_dst % ne1;
+ const int channel_x = row_dst / ne1;
+ const int idst = (row_dst * ne0) + (i0 / 2);
+ const size_t ix = ((size_t) channel_x * s2) + ((size_t) row_x * s1) + (i0 / 2);
+
+ const int sect_dims = sections.v[0] + sections.v[1];
+ const int sector = (i0 / 2) % sect_dims;
+
+ float theta_base = 0.0f;
+ if (sector < sections.v[0]) {
+ const int p = sector;
+ theta_base = pos[channel_x] * sycl::pow(theta_scale, (float) p);
+ } else {
+ const int p = sector - sections.v[0];
+ theta_base = pos[channel_x + ne2] * sycl::pow(theta_scale, (float) p);
+ }
+
+ const float freq_factor = has_ff ? freq_factors[i0 / 2] : 1.0f;
+ float cos_theta;
+ float sin_theta;
+ rope_yarn(theta_base / freq_factor, freq_scale, corr_dims, i0, ext_factor, attn_factor, &cos_theta, &sin_theta);
+ const float x0 = x[ix + 0];
+ const float x1 = x[ix + n_dims];
+
+ // store results in dst
+ dst[idst + 0] = x0 * cos_theta - x1 * sin_theta;
+ dst[idst + n_dims] = x0 * sin_theta + x1 * cos_theta;
+}
+
+template <typename T>
+static void rope_norm_sycl(const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2,
+ const int n_dims, int nr, const int32_t * pos, const float freq_scale, const float freq_base,
+ const float ext_factor, const float attn_factor, const rope_corr_dims corr_dims,
+ const float * freq_factors, queue_ptr stream) {
+ GGML_ASSERT(ne0 % 2 == 0);
+ const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1);
+ const int num_blocks_x = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE));
+ const sycl::range<3> block_nums(1, num_blocks_x, nr);
+
+ const float theta_scale = powf(freq_base, -2.0f / n_dims);
+
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ if (freq_factors == nullptr) {
+ /*
+ DPCT1049:40: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) {
+ rope_norm<T, false>(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims,
+ theta_scale, freq_factors, item_ct1);
+ });
+ } else {
+ /*
+ DPCT1049:41: The work-group size passed to the SYCL kernel may exceed
+ the limit. To get the device limit, query
+ info::device::max_work_group_size. Adjust the work-group size if needed.
+ */
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) {
+ rope_norm<T, true>(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims,
+ theta_scale, freq_factors, item_ct1);
+ });
+ }
+}
+
+template <typename T>
+static void rope_neox_sycl(const T * x, T * dst, const int ne0, const int ne1, const int s1, const int s2,
+ const int n_dims, const int nr, const int32_t * pos, const float freq_scale,
+ const float freq_base, const float ext_factor, const float attn_factor,
+ const rope_corr_dims corr_dims, const float * freq_factors, queue_ptr stream) {
+ GGML_ASSERT(ne0 % 2 == 0);
+ const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1);
+ const int num_blocks_x = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE));
+ const sycl::range<3> block_nums(1, num_blocks_x, nr);
+
+ const float theta_scale = powf(freq_base, -2.0f / n_dims);
+
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+
+ if (freq_factors == nullptr) {
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) {
+ rope_neox<T, false>(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims,
+ theta_scale, freq_factors, item_ct1);
+ });
+ } else {
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims), [=](sycl::nd_item<3> item_ct1) {
+ rope_neox<T, true>(x, dst, ne0, ne1, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor, corr_dims,
+ theta_scale, freq_factors, item_ct1);
+ });
+ }
+}
+
+template <typename T>
+static void rope_multi_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1,
+ const size_t s2, const int n_dims, const int nr, const int32_t * pos,
+ const float freq_scale, const float freq_base, const float ext_factor,
+ const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors,
+ const mrope_sections sections, const bool is_imrope, queue_ptr stream) {
+ GGML_ASSERT(ne0 % 2 == 0);
+ const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1);
+ const int n_blocks_y = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE));
+ const sycl::range<3> grid_dims(1, n_blocks_y, nr);
+ const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims);
+
+ const float theta_scale = std::pow(freq_base, -2.0f / n_dims);
+ // Add FP16 capability check if T could be sycl::half
+ if constexpr (std::is_same_v<T, sycl::half>) {
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+ }
+ // launch kernel
+ if (freq_factors == nullptr) {
+ stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) {
+ rope_multi<T, false>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor,
+ corr_dims, theta_scale, freq_factors, sections, is_imrope, item_ct1);
+ });
+ } else {
+ stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) {
+ rope_multi<T, true>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor,
+ corr_dims, theta_scale, freq_factors, sections, is_imrope, item_ct1);
+ });
+ }
+}
+
+
+
+
+// rope vision
+template <typename T>
+static void rope_vision_sycl(const T * x, T * dst, const int ne0, const int ne1, const int ne2, const size_t s1,
+ const size_t s2, const int n_dims, const int nr, const int32_t * pos,
+ const float freq_scale, const float freq_base, const float ext_factor,
+ const float attn_factor, const rope_corr_dims corr_dims, const float * freq_factors,
+ const mrope_sections sections, queue_ptr stream) {
+ GGML_ASSERT(ne0 % 2 == 0);
+ const sycl::range<3> block_dims(1, SYCL_ROPE_BLOCK_SIZE, 1);
+ const int n_blocks_y = ceil_div(ne0, (2 * SYCL_ROPE_BLOCK_SIZE));
+ const sycl::range<3> grid_dims(1, n_blocks_y, nr);
+ const sycl::nd_range<3> nd_range(grid_dims * block_dims, block_dims);
+
+ const float theta_scale = std::pow(freq_base, -2.0f / n_dims);
+ // Add FP16 capability check if T could be sycl::half
+ if constexpr (std::is_same_v<T, sycl::half>) {
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+ }
+ // launch kernel
+ if (freq_factors == nullptr) {
+ stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) {
+ rope_vision<T, false>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor,
+ corr_dims, theta_scale, freq_factors, sections, item_ct1);
+ });
+ } else {
+ stream->parallel_for(nd_range, [=](sycl::nd_item<3> item_ct1) {
+ rope_vision<T, true>(x, dst, ne0, ne1, ne2, s1, s2, n_dims, pos, freq_scale, ext_factor, attn_factor,
+ corr_dims, theta_scale, freq_factors, sections, item_ct1);
+ });
+ }
+}
+
+inline void ggml_sycl_op_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst) {
+
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32 || dst->src[0]->type == GGML_TYPE_F16);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32 || dst->type == GGML_TYPE_F16);
+ GGML_ASSERT(dst->src[0]->type == dst->type);
+ const int64_t ne00 = dst->src[0]->ne[0]; // head dims
+ const int64_t ne01 = dst->src[0]->ne[1]; // num heads
+ const int64_t ne02 = dst->src[0]->ne[2]; // num heads
+ const int64_t nr = ggml_nrows(dst->src[0]);
+
+ const size_t s01 = dst->src[0]->nb[1] / ggml_type_size(dst->src[0]->type);
+ const size_t s02 = dst->src[0]->nb[2] / ggml_type_size(dst->src[0]->type);
+
+
+ //const int n_past = ((int32_t *) dst->op_params)[0];
+ const int n_dims = ((int32_t *) dst->op_params)[1];
+ const int mode = ((int32_t *) dst->op_params)[2];
+ //const int n_ctx = ((int32_t *) dst->op_params)[3];
+ const int n_ctx_orig = ((int32_t *) dst->op_params)[4];
+ mrope_sections sections;
+
+ // RoPE alteration for extended context
+ float freq_base;
+ float freq_scale;
+ float ext_factor;
+ float attn_factor;
+ float beta_fast;
+ float beta_slow;
+
+ memcpy(&freq_base, (int32_t *) dst->op_params + 5, sizeof(float));
+ memcpy(&freq_scale, (int32_t *) dst->op_params + 6, sizeof(float));
+ memcpy(&ext_factor, (int32_t *) dst->op_params + 7, sizeof(float));
+ memcpy(&attn_factor, (int32_t *) dst->op_params + 8, sizeof(float));
+ memcpy(&beta_fast, (int32_t *) dst->op_params + 9, sizeof(float));
+ memcpy(&beta_slow, (int32_t *) dst->op_params + 10, sizeof(float));
+ memcpy(&sections.v, (int32_t *) dst->op_params + 11, sizeof(int)*4);
+
+ const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
+ const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE;
+ const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE;
+ const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
+
+ if (is_mrope) {
+ GGML_ASSERT(sections.v[0] > 0 || sections.v[1] > 0 || sections.v[2] > 0);
+ }
+
+ if (is_vision) {
+ GGML_ASSERT(n_dims == ne00/2);
+ }
+
+ const int32_t * pos = (const int32_t *) dst->src[1]->data;
+
+ const float * freq_factors = nullptr;
+ if (dst->src[2] != nullptr) {
+ freq_factors = (const float *) dst->src[2]->data;
+ }
+
+ rope_corr_dims corr_dims;
+ ggml_rope_yarn_corr_dims(n_dims, n_ctx_orig, freq_base, beta_fast, beta_slow, corr_dims.v);
+
+ dpct::queue_ptr main_stream = ctx.stream();
+ SYCL_CHECK(ggml_sycl_set_device(ctx.device));
+
+ // compute
+ if (is_neox) {
+ GGML_SYCL_DEBUG("%s: neox path\n", __func__);
+ if (dst->src[0]->type == GGML_TYPE_F32) {
+ rope_neox_sycl((const float *) dst->src[0]->data, (float *) dst->data, ne00, ne01, s01, s02, n_dims, nr,
+ pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, main_stream);
+ } else if (dst->src[0]->type == GGML_TYPE_F16) {
+ rope_neox_sycl((const sycl::half *) dst->src[0]->data, (sycl::half *) dst->data, ne00, ne01, s01, s02,
+ n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors,
+ main_stream);
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ } else if (is_mrope && !is_vision) {
+ GGML_SYCL_DEBUG("%s: mrope path\n", __func__);
+ if (dst->src[0]->type == GGML_TYPE_F16) {
+ rope_multi_sycl((const sycl::half *)dst->src[0]->data, (sycl::half *)dst->data, ne00, ne01, ne02, s01,
+ s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims,
+ freq_factors, sections, is_imrope, main_stream);
+ } else if (dst->src[0]->type == GGML_TYPE_F32) {
+ rope_multi_sycl((const float *) dst->src[0]->data, (float *) dst->data, ne00, ne01, ne02, s01, s02, n_dims,
+ nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections,
+ is_imrope, main_stream);
+ } else {
+ GGML_ABORT("Fatal error: Tensor type unsupported!");
+ }
+ } else if (is_vision) {
+ GGML_SYCL_DEBUG("%s: vision path\n", __func__);
+ if (dst->src[0]->type == GGML_TYPE_F16) {
+ rope_vision_sycl((const sycl::half *) dst->src[0]->data, (sycl::half *) dst->data, ne00, ne01, ne02, s01,
+ s02, n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims,
+ freq_factors, sections, main_stream);
+ } else if (dst->src[0]->type == GGML_TYPE_F32) {
+ rope_vision_sycl((const float *) dst->src[0]->data, (float *) dst->data, ne00, ne01, ne02, s01, s02, n_dims,
+ nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, sections,
+ main_stream);
+ } else {
+ GGML_ABORT("Fatal error: Tensor type unsupported!");
+ }
+ } else {
+ GGML_SYCL_DEBUG("%s: norm path\n", __func__);
+ if (dst->src[0]->type == GGML_TYPE_F32) {
+ rope_norm_sycl((const float *) dst->src[0]->data, (float *) dst->data, ne00, ne01, s01, s02, n_dims, nr,
+ pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors, main_stream);
+ } else if (dst->src[0]->type == GGML_TYPE_F16) {
+ rope_norm_sycl((const sycl::half *) dst->src[0]->data, (sycl::half *) dst->data, ne00, ne01, s01, s02,
+ n_dims, nr, pos, freq_scale, freq_base, ext_factor, attn_factor, corr_dims, freq_factors,
+ main_stream);
+ } else {
+ GGML_ABORT("fatal error");
+ }
+ }
+}
+
+void ggml_sycl_rope(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/3);
+ ggml_sycl_op_rope(ctx, dst);
+}
+
diff --git a/llama.cpp/ggml/src/ggml-sycl/rope.hpp b/llama.cpp/ggml/src/ggml-sycl/rope.hpp
new file mode 100644
index 0000000..8c7141a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/rope.hpp
@@ -0,0 +1,20 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_ROPE_HPP
+#define GGML_SYCL_ROPE_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_rope(ggml_backend_sycl_context & ctx, ggml_tensor *dst);
+
+#endif // GGML_SYCL_ROPE_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/set.cpp b/llama.cpp/ggml/src/ggml-sycl/set.cpp
new file mode 100644
index 0000000..381326d
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/set.cpp
@@ -0,0 +1,73 @@
+#include "presets.hpp"
+#include "common.hpp"
+#include "ggml.h"
+#include "set.hpp"
+#include <cstdint>
+#include <sycl/sycl.hpp>
+using namespace sycl;
+
+// Internal function: perform element-wise set operation for each thread
+inline void set_f32(const float* src, float* dst,
+ const int64_t ne0, const int64_t ne1,
+ const int64_t ne2, const int64_t ne3,
+ const int64_t nb[3], const int64_t src_nb[3],
+ const int64_t offset_elem,
+ const nd_item<1>& item)
+{
+ const size_t idx = item.get_global_id(0);
+ const size_t total = ne0 * ne1 * ne2 * ne3;
+ if (idx >= total) return;
+
+ // Convert linear index to 4D indices
+ const size_t i3 = idx / (ne2 * ne1 * ne0);
+ const size_t rem = idx % (ne2 * ne1 * ne0);
+ const size_t i2 = rem / (ne1 * ne0);
+ const size_t rem2 = rem % (ne1 * ne0);
+ const size_t i1 = rem2 / ne0;
+ const size_t i0 = rem2 % ne0;
+
+ // Compute source and destination indices and copy
+ dst[i0 + i1*nb[0] + i2*nb[1] + i3*nb[2] + offset_elem] =
+ src[i0 + i1*src_nb[0] + i2*src_nb[1] + i3*src_nb[2]];
+}
+
+// Main function: prepare GPU queue and launch parallel_for
+void ggml_sycl_op_set(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+ const ggml_tensor* src0 = dst->src[0];
+ const ggml_tensor* src1 = dst->src[1];
+
+ // Ensure shapes and types are compatible
+ GGML_ASSERT(ggml_are_same_shape(src0, dst));
+ GGML_ASSERT(ggml_is_contiguous(dst) && ggml_is_contiguous(src0));
+ GGML_ASSERT(dst->type == src0->type && src0->type == src1->type && dst->type == GGML_TYPE_F32);
+
+ const int32_t* opts = (const int32_t*) dst->op_params;
+ const int64_t nb[3] = {opts[0]/sizeof(float), opts[1]/sizeof(float), opts[2]/sizeof(float)};
+ const int64_t offset_elem = opts[3] / sizeof(float);
+ const bool inplace = opts[4];
+
+ float* dst_ptr = (float*) dst->data;
+ const float* src0_ptr = (const float*) src0->data;
+ const float* src1_ptr = (const float*) src1->data;
+
+ queue_ptr stream = ctx.stream();
+
+ // Copy src0 to dst if not inplace
+ if (!inplace)
+ stream->memcpy(dst_ptr, src0_ptr, ggml_nbytes(dst));
+
+ const int64_t ne[4] = {src1->ne[0], src1->ne[1], src1->ne[2], src1->ne[3]};
+ const int64_t src_nb[3] = {src1->nb[1]/sizeof(float), src1->nb[2]/sizeof(float), src1->nb[3]/sizeof(float)};
+
+ const size_t total_threads = ne[0]*ne[1]*ne[2]*ne[3];
+ const size_t grid_size = ((total_threads + SYCL_SET_BLOCK_SIZE - 1) / SYCL_SET_BLOCK_SIZE) * SYCL_SET_BLOCK_SIZE;
+
+ // Copy src0 to dst if not inplace
+ stream->parallel_for(
+ nd_range<1>(range<1>(grid_size), range<1>(SYCL_SET_BLOCK_SIZE)),
+ [=](nd_item<1> item) {
+ set_f32(src1_ptr, dst_ptr,
+ ne[0], ne[1], ne[2], ne[3],
+ nb, src_nb, offset_elem, item); }
+ );
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/set.hpp b/llama.cpp/ggml/src/ggml-sycl/set.hpp
new file mode 100644
index 0000000..657d7ac
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/set.hpp
@@ -0,0 +1,5 @@
+#pragma once
+#include "backend.hpp"
+#include "ggml.h"
+
+void ggml_sycl_op_set(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
diff --git a/llama.cpp/ggml/src/ggml-sycl/set_rows.cpp b/llama.cpp/ggml/src/ggml-sycl/set_rows.cpp
new file mode 100644
index 0000000..a641c10
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/set_rows.cpp
@@ -0,0 +1,234 @@
+#include "set_rows.hpp"
+#include "cpy.hpp"
+
+namespace utils {
+template<typename T>
+static constexpr bool is_arithmetic_v() {
+ return std::is_arithmetic_v<T> || std::is_same_v<T, sycl::half> || std::is_same_v<T, sycl::ext::oneapi::bfloat16>;
+}
+}
+
+template<typename TIn, typename TOut>
+static inline std::enable_if_t<utils::is_arithmetic_v<TIn>() && utils::is_arithmetic_v<TOut>(), void>
+convert (const char* src, char* dst) {
+ auto src_val = *reinterpret_cast<const TIn*>(src);
+ auto dst_val = sycl::vec<TIn, 1>(src_val).template convert<TOut, sycl::rounding_mode::automatic>()[0];
+ *reinterpret_cast<TOut*>(dst) = dst_val;
+}
+
+template <typename TIdx, typename blockType, int qk, cpy_kernel_t cpyblck>
+static void set_rows_sycl_q(const char * __restrict__ src0_d,
+ const TIdx * __restrict__ src1_d,
+ blockType * __restrict__ dst_d,
+ // tensor dimensions src0 and src1
+ const int64_t ne00,
+ const int64_t ne01,
+ const int64_t ne02,
+ const int64_t ne03,
+ const int64_t ne10,
+ const int64_t ne11,
+ const int64_t ne12,
+ const int64_t ne13,
+ // strides for src0
+ const size_t nb00,
+ const size_t nb01,
+ const size_t nb02,
+ const size_t nb03,
+ // strides for src1
+ const size_t nb10,
+ const size_t nb11,
+ const size_t nb12,
+ const size_t nb13,
+ // strides for dst
+ const size_t nb1,
+ const size_t nb2,
+ const size_t nb3,
+ queue_ptr stream) {
+ const int64_t total_blocks = (ne00 * ne01 * ne02 * ne03) / qk;
+ constexpr int block_size = 256;
+ const int64_t grid_size = ceil_div(total_blocks, block_size);
+
+ stream->parallel_for(sycl::nd_range<1>(grid_size * block_size, block_size), [=](sycl::nd_item<1> item_ct1) {
+ const int64_t i = item_ct1.get_global_linear_id();
+ if (i >= total_blocks) {
+ return;
+ }
+ const int64_t i_base = i * qk;
+ const int64_t i03 = i_base / (ne00 * ne01 * ne02);
+ const int64_t rem1 = i_base - i03 * (ne00 * ne01 * ne02);
+ const int64_t i02 = rem1 / (ne00 * ne01);
+ const int64_t rem2 = rem1 - i02 * ne00 * ne01;
+ const int64_t i01 = rem2 / ne00;
+ const int64_t i00 = rem2 - i01 * ne00;
+ const int64_t i12 = i03 % ne12;
+ const int64_t i11 = i02 % ne11;
+ const int64_t i10 = i01;
+ const size_t src_offset = calculate_offset<3>({ nb01, nb02, nb03 }, { i01, i02, i03 });
+ const char * src_block = src0_d + src_offset + i00 * sizeof(float);
+ const size_t src1_offset = calculate_offset<3>({ nb10, nb11, nb12 }, { i10, i11, i12 });
+ const int64_t dst_row = src1_d[src1_offset / sizeof(TIdx)];
+ const size_t dst_offset =
+ calculate_offset<3>({ nb1, nb2, nb3 }, { dst_row, i02, i03 }) + (i00 / qk) * sizeof(blockType);
+ char * dst_block = reinterpret_cast<char *>(reinterpret_cast<char *>(dst_d) + dst_offset);
+ cpyblck(src_block, dst_block);
+ });
+ GGML_UNUSED(ne10);
+ GGML_UNUSED(ne13);
+ GGML_UNUSED(nb00);
+ GGML_UNUSED(nb13);
+}
+
+template<typename TIn, typename TIdx, typename TOut>
+static void k_set_rows(
+ const char * __restrict__ src0, const TIdx * __restrict__ src1, char * __restrict__ dst,
+ const int64_t ne00, const int64_t ne01, const int64_t ne02,
+ const int64_t ne11, const int64_t ne12,
+ const size_t nb01, const size_t nb02, const size_t nb03,
+ const size_t nb10, const size_t nb11, const size_t nb12,
+ const size_t nb1, const size_t nb2, const size_t nb3,
+ const size_t src_type_size, const size_t dst_type_size,
+ const int64_t total_elements,
+ const sycl::nd_item<1> & item_ct1) {
+
+ const int64_t i = item_ct1.get_global_linear_id();
+ if (i >= total_elements) {
+ return;
+ }
+
+ const int64_t i03 = i / (ne00 * ne01 * ne02);
+ const int64_t i02 = (i - i03 * ne00 * ne01 * ne02) / (ne00 * ne01);
+ const int64_t i01 = (i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01) / ne00;
+ const int64_t i00 = i - i03 * ne00 * ne01 * ne02 - i02 * ne00 * ne01 - i01 * ne00;
+
+ const int64_t i12 = i03 % ne12;
+ const int64_t i11 = i02 % ne11;
+ const int64_t i10 = i01;
+
+ const int64_t dst_row = *(const TIdx *)((const char *)src1 + calculate_offset<3>({nb10, nb11, nb12}, {i10, i11, i12}));
+
+ const char * src0_row = src0 + calculate_offset<3>({nb01, nb02, nb03}, {i01, i02, i03});
+ const char * src_elem = src0_row + i00 * src_type_size;
+ char * dst_row_ptr = dst + dst_row*nb1 + i02*nb2 + i03*nb3;
+ char * dst_elem = dst_row_ptr + i00 * dst_type_size;
+
+ convert<TIn, TOut>(src_elem, dst_elem);
+}
+
+template<typename TIn, typename TIdx, typename TOut>
+static void set_rows_sycl(
+ const char * src0_d, const TIdx * src1_d, char * dst_d,
+ const int64_t ne00, const int64_t ne01, const int64_t ne02, const int64_t ne03,
+ const int64_t ne11, const int64_t ne12, const size_t nb01, const size_t nb02, const size_t nb03,
+ const size_t nb10, const size_t nb11, const size_t nb12,
+ const size_t nb1, const size_t nb2, const size_t nb3,
+ const size_t src_type_size, const size_t dst_type_size,
+ queue_ptr stream) {
+
+ const int64_t total_elements = ne00 * ne01 * ne02 * ne03;
+
+ constexpr int block_size = 64;
+ const int64_t grid_size = ceil_div(total_elements, block_size);
+
+ stream->parallel_for(
+ sycl::nd_range<1>(grid_size * block_size, block_size),
+ [=](sycl::nd_item<1> item_ct1) {
+ k_set_rows<TIn, TIdx, TOut>(
+ src0_d, src1_d, dst_d,
+ ne00, ne01, ne02,
+ ne11, ne12,
+ nb01, nb02, nb03,
+ nb10, nb11, nb12,
+ nb1, nb2, nb3,
+ src_type_size, dst_type_size,
+ total_elements,
+ item_ct1
+ );
+ }
+ );
+}
+
+template<typename TIn, typename TIdx>
+static void set_rows_sycl(ggml_backend_sycl_context & ctx, const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst) {
+ const char * src0_d = (const char *)src0->data;
+ const TIdx * src1_d = (const TIdx *)src1->data;
+
+ GGML_TENSOR_BINARY_OP_LOCALS
+
+ dpct::queue_ptr stream = ctx.stream();
+ switch (dst->type) {
+ case GGML_TYPE_F32:
+ set_rows_sycl<TIn, TIdx, float>(
+ src0_d, src1_d, (char *)dst->data,
+ ne00, ne01, ne02, ne03,
+ ne11, ne12,
+ nb01, nb02, nb03,
+ nb10, nb11, nb12,
+ nb1, nb2, nb3,
+ sizeof(TIn), sizeof(float),
+ stream
+ );
+ break;
+ case GGML_TYPE_F16:
+ dpct::has_capability_or_fail(stream->get_device(), { sycl::aspect::fp16 });
+ set_rows_sycl<TIn, TIdx, sycl::half>(
+ src0_d, src1_d, (char *)dst->data,
+ ne00, ne01, ne02, ne03,
+ ne11, ne12,
+ nb01, nb02, nb03,
+ nb10, nb11, nb12,
+ nb1, nb2, nb3,
+ sizeof(TIn), sizeof(sycl::half),
+ stream
+ );
+ break;
+ case GGML_TYPE_BF16:
+ set_rows_sycl<TIn, TIdx, sycl::ext::oneapi::bfloat16>(
+ src0_d, src1_d, (char *)dst->data,
+ ne00, ne01, ne02, ne03,
+ ne11, ne12,
+ nb01, nb02, nb03,
+ nb10, nb11, nb12,
+ nb1, nb2, nb3,
+ sizeof(TIn), sizeof(sycl::ext::oneapi::bfloat16),
+ stream
+ );
+ break;
+ case GGML_TYPE_Q8_0:
+ set_rows_sycl_q<TIdx, block_q8_0, QK8_0, cpy_blck_f32_q8_0>(src0_d, src1_d, (block_q8_0 *)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb1, nb2, nb3, stream);
+ break;
+ case GGML_TYPE_Q5_1:
+ set_rows_sycl_q<TIdx, block_q5_1, QK5_1, cpy_blck_f32_q5_1>(src0_d, src1_d, (block_q5_1 *)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb1, nb2, nb3, stream);
+ break;
+ case GGML_TYPE_Q5_0:
+ set_rows_sycl_q<TIdx, block_q5_0, QK5_0, cpy_blck_f32_q5_0>(src0_d, src1_d, (block_q5_0 *)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb1, nb2, nb3, stream);
+ break;
+ case GGML_TYPE_Q4_1:
+ set_rows_sycl_q<TIdx, block_q4_1, QK4_1, cpy_blck_f32_q4_1>(src0_d, src1_d, (block_q4_1 *)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb1, nb2, nb3, stream);
+ break;
+ case GGML_TYPE_Q4_0:
+ set_rows_sycl_q<TIdx, block_q4_0, QK4_0, cpy_blck_f32_q4_0>(src0_d, src1_d, (block_q4_0 *)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb1, nb2, nb3, stream);
+ break;
+ case GGML_TYPE_IQ4_NL:
+ set_rows_sycl_q<TIdx, block_iq4_nl, QK4_NL, cpy_blck_f32_iq4_nl>(src0_d, src1_d, (block_iq4_nl *)dst->data, ne00, ne01, ne02, ne03, ne10, ne11, ne12, ne13, nb00, nb01, nb02, nb03, nb10, nb11, nb12, nb13, nb1, nb2, nb3, stream);
+ break;
+
+ default:
+ GGML_ABORT("Unsupported tensor type!");
+ break;
+ }
+}
+
+void ggml_sycl_op_set_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ const ggml_tensor * src0 = dst->src[0];
+ const ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(dst->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->src[1]->type == GGML_TYPE_I64 || dst->src[1]->type == GGML_TYPE_I32);
+
+ if (src1->type == GGML_TYPE_I64) {
+ set_rows_sycl<float, int64_t>(ctx, src0, src1, dst);
+ } else {
+ set_rows_sycl<float, int32_t>(ctx, src0, src1, dst);
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/set_rows.hpp b/llama.cpp/ggml/src/ggml-sycl/set_rows.hpp
new file mode 100644
index 0000000..27fcc8f
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/set_rows.hpp
@@ -0,0 +1,8 @@
+#ifndef GGML_SYCL_SET_ROWS_HPP
+#define GGML_SYCL_SET_ROWS_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_set_rows(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_SET_ROWS_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/softmax.cpp b/llama.cpp/ggml/src/ggml-sycl/softmax.cpp
new file mode 100644
index 0000000..b41124a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/softmax.cpp
@@ -0,0 +1,426 @@
+#include "softmax.hpp"
+#include <cstdint>
+#include <utility>
+#include <cmath>
+
+
+template <typename T> static __dpct_inline__ float t2f32(T val) {
+ return (float) val;
+}
+
+template <> float __dpct_inline__ t2f32<sycl::half>(sycl::half val) {
+ return sycl::vec<sycl::half, 1>(val)
+ .convert<float, sycl::rounding_mode::automatic>()[0];
+}
+
+struct soft_max_params {
+
+ int64_t nheads;
+ uint32_t n_head_log2;
+ int64_t ncols;
+ int64_t nrows_x;
+ int64_t nrows_y;
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ int64_t nb11;
+ int64_t nb12;
+ int64_t nb13;
+
+ int64_t ne12;
+ int64_t ne13;
+ float scale;
+ float max_bias;
+ float m0;
+ float m1;
+};
+
+// When ncols_template == 0 the bounds for the loops in this function are not known and can't be unrolled.
+// As we want to keep pragma unroll for all other cases we supress the clang transformation warning here.
+#ifdef __clang__
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wpass-failed"
+#endif // __clang__
+template <bool use_shared, int ncols_template, int block_size_template, typename T>
+static void soft_max_f32(const float * x,
+ const T * mask,
+ const float * sinks,
+ float * dst,
+ const soft_max_params p,
+ uint8_t * dpct_local) {
+ auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>();
+ const int ncols = ncols_template == 0 ? p.ncols : ncols_template;
+ const int block_size = block_size_template == 0
+ ? item_ct1.get_local_range(2)
+ : block_size_template;
+ const int nthreads = block_size;
+ const int nwarps = nthreads / WARP_SIZE;
+ size_t nreduce = nwarps / WARP_SIZE;
+
+ const int tid = item_ct1.get_local_id(2);
+
+ const int64_t i03 = item_ct1.get_group(0);
+ const int64_t i02 = item_ct1.get_group(1);
+ const int64_t i01 = item_ct1.get_group(2);
+
+ //TODO: noncontigous inputs/outputs
+ const int rowx = item_ct1.get_group(2) +
+ item_ct1.get_group(1) * item_ct1.get_group_range(2) +
+ item_ct1.get_group(0) * item_ct1.get_group_range(2) *
+ item_ct1.get_group_range(1);
+
+ const int64_t i11 = i01;
+ const int64_t i12 = i02 % p.ne12;
+ const int64_t i13 = i03 % p.ne13;
+
+ x += int64_t(rowx)*ncols;
+ mask += (i11*p.nb11 + i12*p.nb12 + i13*p.nb13) / sizeof(T) * (mask != nullptr);
+ dst += int64_t(rowx)*ncols;
+
+ const int warp_id = item_ct1.get_local_id(2) / WARP_SIZE;
+ const int lane_id = item_ct1.get_local_id(2) % WARP_SIZE;
+
+ const float slope = get_alibi_slope(p.max_bias, i02, p.n_head_log2, p.m0, p.m1);
+
+ float * buf_iw = (float *) dpct_local;
+
+ // shared memory buffer to cache values between iterations:
+ float *vals = use_shared ? buf_iw + sycl::max(nwarps, WARP_SIZE) : dst;
+ float max_val = sinks ? sinks[i02] : -INFINITY;
+#pragma unroll
+ for (int col0 = 0; col0 < ncols; col0 += block_size) {
+ const int col = col0 + tid;
+
+ if (ncols_template == 0 && col >= ncols) {
+ break;
+ }
+
+ const float val = x[col]*p.scale + (mask ? slope*t2f32(mask[col]) : 0.0f);
+
+ vals[col] = val;
+ max_val = sycl::max(max_val, val);
+ }
+ // find the max value in the block
+ max_val = warp_reduce_max(max_val);
+
+ if (block_size > WARP_SIZE) {
+ if (warp_id == 0) {
+ buf_iw[lane_id] = -INFINITY;
+ }
+ item_ct1.barrier();
+
+ if (lane_id == 0) {
+ buf_iw[warp_id] = max_val;
+ }
+ item_ct1.barrier();
+
+ max_val = buf_iw[lane_id];
+ max_val = warp_reduce_max(max_val);
+ }
+ float tmp = 0.0f; // partial sum
+
+#pragma unroll
+ for (int col0 = 0; col0 < ncols; col0 += block_size) {
+ const int col = col0 + tid;
+
+ if (ncols_template == 0 && col >= ncols) {
+ break;
+ }
+
+ const float val = sycl::native::exp(vals[col] - max_val);
+ tmp += val;
+ vals[col] = val;
+ }
+ // find the sum of exps in the block
+ tmp = warp_reduce_sum(tmp);
+ if (block_size > WARP_SIZE) {
+ item_ct1.barrier();
+ if (warp_id == 0) {
+ buf_iw[lane_id] = 0.0f;
+ for (size_t i = 1; i < nreduce; i += 1) {
+ buf_iw[lane_id + i * WARP_SIZE] = 0.f;
+ }
+ }
+ item_ct1.barrier();
+
+ if (lane_id == 0) {
+ buf_iw[warp_id] = tmp;
+ }
+ item_ct1.barrier();
+
+ tmp = buf_iw[lane_id];
+ for (size_t i = 1; i < nreduce; i += 1) {
+ tmp += buf_iw[lane_id + i * WARP_SIZE];
+ }
+ tmp = warp_reduce_sum(tmp);
+ }
+ if (sinks) {
+ tmp += sycl::native::exp(sinks[i02] - max_val);
+ }
+ const float inv_sum = 1.0f / tmp;
+
+#pragma unroll
+ for (int col0 = 0; col0 < ncols; col0 += block_size) {
+ const int col = col0 + tid;
+
+ if (ncols_template == 0 && col >= ncols) {
+ return;
+ }
+
+ dst[col] = vals[col] * inv_sum;
+ }
+}
+#ifdef __clang__
+#pragma clang diagnostic pop
+#endif // __clang__
+
+static void soft_max_back_f32(const float *grad, const float *dstf, float *dst,
+ const int ncols, const float scale) {
+ auto item_ct1 = sycl::ext::oneapi::this_work_item::get_nd_item<3>();
+ const int tid = item_ct1.get_local_id(2);
+ const int rowx = item_ct1.get_group(2);
+
+ grad += int64_t(rowx)*ncols;
+ dstf += int64_t(rowx)*ncols;
+ dst += int64_t(rowx)*ncols;
+
+ float dgf_dot = 0.0f; // dot product of dst from forward pass and gradients
+
+ for (int col = tid; col < ncols; col += WARP_SIZE) {
+ dgf_dot += dstf[col]*grad[col];
+ }
+
+ dgf_dot = warp_reduce_sum(dgf_dot);
+
+ for (int col = tid; col < ncols; col += WARP_SIZE) {
+ dst[col] = scale * (grad[col] - dgf_dot) * dstf[col];
+ }
+}
+
+template <int... Ns, typename T>
+static void launch_soft_max_kernels(const float * x,
+ const T * mask,
+ const float * sinks,
+ float * dst,
+ const soft_max_params & p,
+ dpct::queue_ptr stream,
+ dpct::dim3 block_dims,
+ dpct::dim3 block_nums,
+ size_t nbytes_shared)
+{
+ auto launch_kernel = [=](auto I) -> bool {
+ constexpr int ncols = decltype(I)::value;
+ constexpr int block = (ncols > 1024 ? 1024 : ncols);
+ if (p.ncols == ncols) {
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
+ sycl::range<1>(nbytes_shared), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) [[sycl::reqd_sub_group_size(
+ WARP_SIZE)]] {
+ soft_max_f32<true, ncols, block>(
+ x, mask, sinks, dst, p,
+ dpct_local_acc_ct1
+ .get_multi_ptr<sycl::access::decorated::no>()
+ .get());
+ GGML_UNUSED(item_ct1);
+ });
+ });
+ return true;
+ }
+ return false;
+ };
+
+ // unary fold over launch_kernel
+ if ((launch_kernel(std::integral_constant<int, Ns>{}) || ...)) {
+ return;
+ }
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
+ sycl::range<1>(nbytes_shared), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1)
+ [[sycl::reqd_sub_group_size(WARP_SIZE)]] {
+ soft_max_f32<true, 0, 0>(
+ x, mask, sinks, dst, p,
+ dpct_local_acc_ct1
+ .get_multi_ptr<sycl::access::decorated::no>()
+ .get());
+ GGML_UNUSED(item_ct1);
+ });
+ });
+}
+
+template <typename T>
+static void soft_max_f32_sycl(const float *x, const T *mask,
+ const float *sinks, float *dst,
+ const soft_max_params &params,
+ dpct::queue_ptr stream, int device) {
+ int nth = WARP_SIZE;
+ int max_block_size = ggml_sycl_info().max_work_group_sizes[device];
+ const int64_t ncols_x = params.ncols;
+
+ while (nth < ncols_x && nth < max_block_size) nth *= 2;
+ if (nth>max_block_size) nth = max_block_size;
+
+ const dpct::dim3 block_dims(nth, 1, 1);
+ const dpct::dim3 block_nums(params.ne01, params.ne02, params.ne03);
+ const size_t nbytes_shared =
+ (GGML_PAD(ncols_x, WARP_SIZE) + WARP_SIZE) * sizeof(float);
+
+ const int id = get_current_device_id();
+ const size_t smpbo = ggml_sycl_info().devices[id].smpbo;
+
+ if (nbytes_shared <= smpbo && ncols_x <= max_block_size) {
+ launch_soft_max_kernels<32, 64, 128, 256, 512, 1024, 2048, 4096>(
+ x, mask, sinks, dst, params, stream, block_dims, block_nums,
+ nbytes_shared);
+ } else {
+ const size_t nbytes_shared_low = WARP_SIZE * sizeof(float);
+
+ stream->submit([&](sycl::handler &cgh) {
+ sycl::local_accessor<uint8_t, 1> dpct_local_acc_ct1(
+ sycl::range<1>(nbytes_shared_low), cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ soft_max_f32<false, 0, 0>(
+ x, mask, sinks, dst, params,
+ dpct_local_acc_ct1
+ .get_multi_ptr<sycl::access::decorated::no>()
+ .get());
+ GGML_UNUSED(item_ct1);
+ });
+ });
+ }
+}
+
+static void soft_max_back_f32_sycl(const float * grad,
+ const float * dstf,
+ float * dst,
+ const int ncols,
+ const int nrows,
+ const float scale,
+ dpct::queue_ptr stream) {
+ const dpct::dim3 block_dims(WARP_SIZE, 1, 1);
+ const dpct::dim3 block_nums(nrows, 1, 1);
+
+ stream->parallel_for(sycl::nd_range<3>(block_nums * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ soft_max_back_f32(grad, dstf, dst, ncols, scale);
+ GGML_UNUSED(item_ct1);
+ });
+}
+
+void ggml_sycl_op_soft_max(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+
+ const ggml_tensor * src0 = dst->src[0];
+ const ggml_tensor * src1 = dst->src[1];
+ const ggml_tensor * src2 = dst->src[2];
+
+ const float * src0_d = (const float *) src0->data;
+ const void * src1_d = src1 ? (const void *) src1->data : nullptr;
+ const void * src2_d = src2 ? (const void *) src2->data : nullptr;
+ float * dst_d = (float *) dst->data;
+
+ dpct::queue_ptr stream = ctx.stream();
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+
+ // src1 contains mask and it is optional
+ GGML_ASSERT(!src1 || src1->type == GGML_TYPE_F16 || src1->type == GGML_TYPE_F32);
+
+ const int64_t nrows_x = ggml_nrows(src0);
+ const int64_t nrows_y = src0->ne[1];
+
+ const int64_t ne00 = src0->ne[0];
+
+ float scale = 1.0f;
+ float max_bias = 0.0f;
+
+ memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
+ memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
+
+ const bool use_f16 = (src1 && src1->type == GGML_TYPE_F16);
+
+ const int64_t nb11 = src1 ? src1->nb[1] : 1;
+ const int64_t nb12 = src1 ? src1->nb[2] : 1;
+ const int64_t nb13 = src1 ? src1->nb[3] : 1;
+
+ const int64_t ne12 = src1 ? src1->ne[2] : 1;
+ const int64_t ne13 = src1 ? src1->ne[3] : 1;
+
+ const uint32_t n_head = src0->ne[2];
+ const uint32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
+
+ const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
+ const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+
+ soft_max_params params = {};
+ params.nheads = src0->ne[2];
+ params.n_head_log2 = n_head_log2;
+ params.ncols = ne00;
+ params.nrows_x = nrows_x;
+ params.nrows_y = nrows_y;
+ params.ne00 = src0->ne[0];
+ params.ne01 = src0->ne[1];
+ params.ne02 = src0->ne[2];
+ params.ne03 = src0->ne[3];
+ params.nb11 = nb11;
+ params.nb12 = nb12;
+ params.nb13 = nb13;
+ params.ne12 = ne12;
+ params.ne13 = ne13;
+ params.scale = scale;
+ params.max_bias = max_bias;
+ params.m0 = m0;
+ params.m1 = m1;
+
+ if (use_f16) {
+ soft_max_f32_sycl(src0_d, (const sycl::half *)src1_d,
+ (const float *)src2_d, dst_d, params, stream,
+ ctx.device);
+ } else {
+ soft_max_f32_sycl(src0_d, (const float *)src1_d, (const float *)src2_d,
+ dst_d, params, stream, ctx.device);
+ }
+}
+
+void ggml_sycl_op_soft_max_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/2);
+ const ggml_tensor * src0 = dst->src[0]; // grad
+ const ggml_tensor * src1 = dst->src[1]; // forward pass output
+
+ const float * src0_d = (const float *) src0->data;
+ const float * src1_d = (const float *) src1->data;
+ float * dst_d = (float *) dst->data;
+
+ dpct::queue_ptr stream = ctx.stream();
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT( dst->type == GGML_TYPE_F32);
+
+ const int64_t ncols = src0->ne[0];
+ const int64_t nrows = ggml_nrows(src0);
+
+ float scale = 1.0f;
+ float max_bias = 0.0f;
+
+ memcpy(&scale, (const float *) dst->op_params + 0, sizeof(float));
+ memcpy(&max_bias, (const float *) dst->op_params + 1, sizeof(float));
+
+ GGML_ASSERT(max_bias == 0.0f);
+
+ soft_max_back_f32_sycl(src0_d, src1_d, dst_d, ncols, nrows, scale, stream);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/softmax.hpp b/llama.cpp/ggml/src/ggml-sycl/softmax.hpp
new file mode 100644
index 0000000..23f1e5a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/softmax.hpp
@@ -0,0 +1,24 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_SOFTMAX_HPP
+#define GGML_SYCL_SOFTMAX_HPP
+
+#include "common.hpp"
+
+#define SYCL_SOFT_MAX_BLOCK_SIZE 1024
+
+void ggml_sycl_op_soft_max(ggml_backend_sycl_context &ctx, ggml_tensor *dst);
+
+void ggml_sycl_op_soft_max_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_SOFTMAX_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/ssm_conv.cpp b/llama.cpp/ggml/src/ggml-sycl/ssm_conv.cpp
new file mode 100644
index 0000000..eea9a73
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/ssm_conv.cpp
@@ -0,0 +1,127 @@
+#include "ssm_conv.hpp"
+#include "common.hpp"
+
+#include <cstdio>
+
+using namespace sycl;
+
+static void kernel_ssm_conv(
+ queue &q,
+ const float *src_data,
+ const float *weights,
+ float *dst_data,
+ int d_conv,
+ int d_inner,
+ int n_t,
+ int n_s,
+ int ncs __attribute__((unused)),
+ int src_stride_inner,
+ int src_stride_seq,
+ int dst_stride_token,
+ int dst_stride_seq
+) {
+ const size_t total_work = static_cast<size_t>(d_inner) * static_cast<size_t>(n_t) * static_cast<size_t>(n_s);
+ const size_t work_group_size = 256;
+ const size_t num_work_groups = (total_work + work_group_size - 1) / work_group_size;
+
+ const range<1> global_range(num_work_groups * work_group_size);
+ const range<1> local_range(work_group_size);
+
+ q.submit([&](handler &h) {
+ h.parallel_for(
+ nd_range<1>(global_range, local_range),
+ [=](nd_item<1> item) {
+ const size_t idx = item.get_global_id(0);
+ if (idx >= total_work) {
+ return;
+ }
+
+ const int channel = static_cast<int>(idx % d_inner);
+ const int token = static_cast<int>((idx / d_inner) % n_t);
+ const int seq = static_cast<int>(idx / (static_cast<size_t>(d_inner) * static_cast<size_t>(n_t)));
+
+ const float *s = src_data
+ + static_cast<size_t>(seq) * static_cast<size_t>(src_stride_seq)
+ + static_cast<size_t>(channel) * static_cast<size_t>(src_stride_inner)
+ + static_cast<size_t>(token);
+
+ const float *c = weights + static_cast<size_t>(channel) * static_cast<size_t>(d_conv);
+
+ float sumf = 0.0f;
+ for (int i0 = 0; i0 < d_conv; ++i0) {
+ sumf += s[i0] * c[i0];
+ }
+
+ const size_t dst_idx =
+ static_cast<size_t>(seq) * static_cast<size_t>(dst_stride_seq) +
+ static_cast<size_t>(token) * static_cast<size_t>(dst_stride_token) +
+ static_cast<size_t>(channel);
+
+ dst_data[dst_idx] = sumf;
+ }
+ );
+ });
+}
+
+void ggml_sycl_ssm_conv(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ ggml_tensor * src0 = dst->src[0];
+ ggml_tensor * src1 = dst->src[1];
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(src1->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ const int d_conv = src1->ne[0];
+ const int ncs = src0->ne[0];
+ const int d_inner = src0->ne[1];
+ const int n_t = dst->ne[1];
+ const int n_s = dst->ne[2];
+
+ GGML_ASSERT(src0->ne[0] == d_conv - 1 + n_t);
+ GGML_ASSERT(src0->ne[1] == d_inner);
+ GGML_ASSERT(src1->ne[1] == d_inner);
+
+ GGML_ASSERT(dst->ne[0] == d_inner);
+ GGML_ASSERT(dst->ne[1] == n_t);
+ GGML_ASSERT(dst->ne[2] == n_s);
+
+ GGML_ASSERT(src0->nb[0] == sizeof(float));
+ GGML_ASSERT(src1->nb[0] == sizeof(float));
+
+ GGML_ASSERT(src0->nb[1] == src0->ne[0] * sizeof(float));
+
+ const int src_stride_inner = ncs;
+ const int src_stride_seq = ncs * d_inner;
+ const int dst_stride_token = d_inner;
+ const int dst_stride_seq = d_inner * n_t;
+
+ try {
+ queue *q = ctx.stream();
+
+ const float *src_data = static_cast<const float *>(src0->data);
+ const float *weights = static_cast<const float *>(src1->data);
+ float *dst_data = static_cast<float *>(dst->data);
+
+ GGML_ASSERT(src_data && weights && dst_data);
+
+ kernel_ssm_conv(
+ *q,
+ src_data,
+ weights,
+ dst_data,
+ d_conv,
+ d_inner,
+ n_t,
+ n_s,
+ ncs,
+ src_stride_inner,
+ src_stride_seq,
+ dst_stride_token,
+ dst_stride_seq
+ );
+
+ } catch (const std::exception &e) {
+ std::fprintf(stderr, "[SYCL-SSM_CONV] ERROR: %s\n", e.what());
+ throw;
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/ssm_conv.hpp b/llama.cpp/ggml/src/ggml-sycl/ssm_conv.hpp
new file mode 100644
index 0000000..1a8ad05
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/ssm_conv.hpp
@@ -0,0 +1,5 @@
+#pragma once
+
+#include "common.hpp"
+
+void ggml_sycl_ssm_conv(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
diff --git a/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp b/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp
new file mode 100644
index 0000000..7041140
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/sycl_hw.cpp
@@ -0,0 +1,15 @@
+#include "sycl_hw.hpp"
+
+// TODO: currently not used
+/*
+sycl_hw_info get_device_hw_info(sycl::device *device_ptr) {
+ sycl_hw_info res;
+ int32_t id = device_ptr->get_info<sycl::ext::intel::info::device::device_id>();
+ res.device_id = id;
+
+ syclex::architecture arch = device_ptr->get_info<syclex::info::device::architecture>();
+ res.arch = arch;
+
+ return res;
+}
+*/
diff --git a/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp b/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp
new file mode 100644
index 0000000..36b140b
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/sycl_hw.hpp
@@ -0,0 +1,26 @@
+#ifndef SYCL_HW_HPP
+#define SYCL_HW_HPP
+
+#include <algorithm>
+#include <stdio.h>
+#include <vector>
+#include <map>
+
+#include <sycl/sycl.hpp>
+
+namespace syclex = sycl::ext::oneapi::experimental;
+
+// TODO: currently not used
+/*
+struct sycl_hw_info {
+ syclex::architecture arch;
+ int32_t device_id;
+};
+
+bool is_in_vector(std::vector<int> &vec, int item);
+
+sycl_hw_info get_device_hw_info(sycl::device *device_ptr);
+*/
+
+
+#endif // SYCL_HW_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp b/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp
new file mode 100644
index 0000000..f200379
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/tsembd.cpp
@@ -0,0 +1,73 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#include "tsembd.hpp"
+
+static void timestep_embedding_f32(
+ const float * timesteps, float * dst, const int nb1,
+ const int dim, const int max_period, const sycl::nd_item<3> &item_ct1) {
+ // item_ct1.get_group(1)(blockIDx.y): idx of timesteps->ne[0]
+ // item_ct1.get_group(2) (blockIDx.x): idx of ((dim + 1) / 2) / BLOCK_SIZE
+ int i = item_ct1.get_group(1);
+ int j = item_ct1.get_local_id(2) + item_ct1.get_group(2) * item_ct1.get_local_range(2);
+ float * embed_data = (float *)((char *)dst + i*nb1);
+
+ int half = dim / 2;
+
+ if (dim % 2 != 0 && j == half) {
+ embed_data[2 * half] = 0.f;
+ }
+
+ if (j >= half) {
+ return;
+ }
+
+ float timestep = timesteps[i];
+ float freq = (float)sycl::native::exp(-(sycl::log((float)max_period)) * j / half);
+ float arg = timestep * freq;
+ embed_data[j] = sycl::cos(arg);
+ embed_data[j + half] = sycl::sin(arg);
+}
+
+static void timestep_embedding_f32_sycl(
+ const float * x, float * dst, const int ne00, const int nb1,
+ const int dim, const int max_period, const queue_ptr& stream) {
+ // As the kernel returns when thread.idx is larger than dim/2, the half_ceil does not need to pad
+ int half_ceil = dim / 2;
+ int num_blocks = (half_ceil + SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE - 1) / SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE;
+ sycl::range<3> block_dims(1, 1, SYCL_TIMESTEP_EMBEDDING_BLOCK_SIZE);
+ sycl::range<3> gridDim(1, ne00, num_blocks);
+ stream->parallel_for(
+ sycl::nd_range<3>(
+ gridDim * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ timestep_embedding_f32(
+ x, dst, nb1, dim, max_period, item_ct1
+ );
+ });
+}
+
+void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, ggml_tensor * dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/1);
+ const ggml_tensor * src0 = dst->src[0];
+ const float * src0_d = (const float *)src0->data;
+ float * dst_d = (float *)dst->data;
+ dpct::queue_ptr stream = ctx.stream();
+
+ GGML_ASSERT(src0->type == GGML_TYPE_F32);
+ GGML_ASSERT(dst->type == GGML_TYPE_F32);
+
+ const int dim = dst->op_params[0];
+ const int max_period = dst->op_params[1];
+
+ timestep_embedding_f32_sycl(src0_d, dst_d, src0->ne[0], dst->nb[1], dim, max_period, stream);
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp b/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp
new file mode 100644
index 0000000..4c18748
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/tsembd.hpp
@@ -0,0 +1,20 @@
+//
+// MIT license
+// Copyright (C) 2024 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_TSEMBD_HPP
+#define GGML_SYCL_TSEMBD_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_timestep_embedding(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_TSEMBD_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp b/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp
new file mode 100644
index 0000000..43482b3
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/vecdotq.hpp
@@ -0,0 +1,1361 @@
+//
+// MIT license
+// Copyright (C) 2025 Intel Corporation
+// SPDX-License-Identifier: MIT
+//
+
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+
+#ifndef GGML_SYCL_VECDOTQ_HPP
+#define GGML_SYCL_VECDOTQ_HPP
+
+#include "dpct/helper.hpp"
+#include "ggml.h"
+#include "quants.hpp"
+
+typedef float (*vec_dot_q_sycl_t)(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1,
+ const int & iqs);
+
+static __dpct_inline__ int get_int_b1(const void * x, const int & i32) {
+ const uint8_t * x8 = (const uint8_t *) x;
+
+ int x32 = x8[4*i32 + 0] << 0;
+ x32 |= x8[4*i32 + 1] << 8;
+ x32 |= x8[4*i32 + 2] << 16;
+ x32 |= x8[4*i32 + 3] << 24;
+
+ return x32;
+}
+
+
+static __dpct_inline__ int get_int_from_int8(const int8_t* x8, const int& i32) {
+ const uint16_t* x16 =
+ (const uint16_t*)(x8 + sizeof(int) * i32); // assume at least 2 byte
+ // alignment
+
+ int x32 = 0;
+ x32 |= x16[0] << 0;
+ x32 |= x16[1] << 16;
+
+ return x32;
+}
+
+static __dpct_inline__ int get_int_from_uint8(
+ const uint8_t* x8,
+ const int& i32) {
+ const uint16_t* x16 =
+ (const uint16_t*)(x8 + sizeof(int) * i32); // assume at least 2 byte
+ // alignment
+
+ int x32 = 0;
+ x32 |= x16[0] << 0;
+ x32 |= x16[1] << 16;
+
+ return x32;
+}
+
+static __dpct_inline__ int get_int_from_int8_aligned(
+ const int8_t* x8,
+ const int& i32) {
+ return *(
+ (const int*)(x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
+}
+
+static __dpct_inline__ int get_int_from_uint8_aligned(
+ const uint8_t* x8,
+ const int& i32) {
+ return *(
+ (const int*)(x8 + sizeof(int) * i32)); // assume at least 4 byte alignment
+}
+
+static __dpct_inline__ void get_int_from_table_16(const uint32_t &q4,
+ const uint8_t *values,
+ int &val1, int &val2) {
+
+ uint32_t aux32; const uint8_t * q8 = (const uint8_t *)&aux32;
+ aux32 = q4 & 0x0f0f0f0f;
+ uint16_t v1 = values[q8[0]] | (values[q8[1]] << 8);
+ uint16_t v2 = values[q8[2]] | (values[q8[3]] << 8);
+ val1 = v1 | (v2 << 16);
+ aux32 = (q4 >> 4) & 0x0f0f0f0f;
+ v1 = values[q8[0]] | (values[q8[1]] << 8);
+ v2 = values[q8[2]] | (values[q8[3]] << 8);
+ val2 = v1 | (v2 << 16);
+}
+
+static __dpct_inline__ sycl::int2 get_int_from_table_16(
+ const int& q4, const int8_t* table) {
+ const uint32_t* table32 = (const uint32_t*)table;
+ uint32_t tmp[2];
+ const uint32_t low_high_selection_indices =
+ (0x32103210 | ((q4 & 0x88888888) >> 1));
+#pragma unroll
+ for (uint32_t i = 0; i < 2; ++i) {
+ const uint32_t shift = 16 * i;
+
+ const uint32_t low =
+ dpct::byte_level_permute(table32[0], table32[1], q4 >> shift);
+ const uint32_t high =
+ dpct::byte_level_permute(table32[2], table32[3], q4 >> shift);
+ tmp[i] = dpct::byte_level_permute(
+ low, high, low_high_selection_indices >> shift);
+ }
+ return sycl::int2(
+ dpct::byte_level_permute(tmp[0], tmp[1], 0x6420),
+ dpct::byte_level_permute(tmp[0], tmp[1], 0x7531));
+}
+
+#define VDR_Q2_K_Q8_1_MMVQ 1
+
+// contiguous v/x values
+static __dpct_inline__ float vec_dot_q2_K_q8_1_impl_mmvq(
+ const int &v, const int *__restrict__ u, const uint8_t *__restrict__ scales,
+ const sycl::half2 &dm2, const float *__restrict__ d8) {
+
+ float sumf_d = 0.0f;
+ float sumf_m = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR2_K; ++i) {
+ const int sc = scales[2*i];
+
+ const int vi = (v >> (2*i)) & 0x03030303;
+
+ sumf_d +=
+ d8[i] * (dpct::dp4a(vi, u[i], 0) * (sc & 0xF)); // SIMD dot product
+
+ // fill int with 4x m
+ int m = sc >> 4;
+ m |= m << 8;
+ m |= m << 16;
+ sumf_m += d8[i] *
+ dpct::dp4a(
+ m, u[i],
+ 0); // multiply constant q2_K part with sum of q8_1 values
+ }
+
+ const sycl::float2 dm2f =
+ dm2.convert<float, sycl::rounding_mode::automatic>();
+
+ return dm2f.x() * sumf_d - dm2f.y() * sumf_m;
+}
+
+
+#define VDR_Q3_K_Q8_1_MMVQ 1
+
+// contiguous v/x values
+static __dpct_inline__ float vec_dot_q3_K_q8_1_impl_mmvq(
+ const int &vl, const int &vh, const int *__restrict__ u,
+ const uint8_t *__restrict__ scales, const int &scale_offset,
+ const float &d3, const float *__restrict__ d8) {
+
+ float sumf = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR3_K; ++i) {
+ const int isc = scale_offset + 2*i;
+
+ const int isc_low = isc % (QK_K/32);
+ const int sc_shift_low = 4 * (isc / (QK_K/32));
+ const int sc_low = (scales[isc_low] >> sc_shift_low) & 0xF;
+
+ const int isc_high = isc % (QK_K/64);
+ const int sc_shift_high = 2 * (isc / (QK_K/64));
+ const int sc_high = ((scales[(QK_K/32) + isc_high] >> sc_shift_high) & 3) << 4;
+
+ const int sc = (sc_low | sc_high) - 32;
+
+ const int vil = (vl >> (2*i)) & 0x03030303;
+
+ const int vih = ((vh >> i) << 2) & 0x04040404;
+
+ const int vi =
+ dpct::vectorized_binary<sycl::char4>(vil, vih, dpct::sub_sat());
+
+ sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product
+ }
+
+ return d3 * sumf;
+}
+
+#define VDR_Q4_K_Q8_1_MMVQ 2
+
+// contiguous v/x values
+static __dpct_inline__ float vec_dot_q4_K_q8_1_impl_vmmq(
+ const int *__restrict__ v, const int *__restrict__ u,
+ const uint8_t *__restrict__ sc, const uint8_t *__restrict__ m,
+ const sycl::half2 &dm4, const float *__restrict__ d8) {
+
+ float sumf_d = 0.0f;
+ float sumf_m = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR4_K; ++i) {
+ const int v0i = (v[0] >> (4*i)) & 0x0F0F0F0F;
+ const int v1i = (v[1] >> (4*i)) & 0x0F0F0F0F;
+
+ const int dot1 =
+ dpct::dp4a(v1i, u[2 * i + 1],
+ dpct::dp4a(v0i, u[2 * i + 0], 0)); // SIMD dot product
+ const int dot2 =
+ dpct::dp4a(0x01010101, u[2 * i + 1],
+ dpct::dp4a(0x01010101, u[2 * i + 0], 0)); // sum of u
+
+ sumf_d += d8[i] * (dot1 * sc[i]);
+ sumf_m += d8[i] * (dot2 * m[i]); // multiply constant part of q4_K with sum of q8_1 values
+ }
+
+ const sycl::float2 dm4f =
+ dm4.convert<float, sycl::rounding_mode::automatic>();
+
+ return dm4f.x() * sumf_d - dm4f.y() * sumf_m;
+}
+
+
+#define VDR_Q5_K_Q8_1_MMVQ 2
+
+// contiguous v/x values
+static __dpct_inline__ float vec_dot_q5_K_q8_1_impl_vmmq(
+ const int *__restrict__ vl, const int *__restrict__ vh,
+ const int *__restrict__ u, const uint8_t *__restrict__ sc,
+ const uint8_t *__restrict__ m, const sycl::half2 &dm5,
+ const float *__restrict__ d8) {
+
+ float sumf_d = 0.0f;
+ float sumf_m = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR5_K; ++i) {
+ const int vl0i = (vl[0] >> (4*i)) & 0x0F0F0F0F;
+ const int vl1i = (vl[1] >> (4*i)) & 0x0F0F0F0F;
+
+ const int vh0i = ((vh[0] >> i) << 4) & 0x10101010;
+ const int vh1i = ((vh[1] >> i) << 4) & 0x10101010;
+
+ const int v0i = vl0i | vh0i;
+ const int v1i = vl1i | vh1i;
+
+ const int dot1 =
+ dpct::dp4a(v0i, u[2 * i + 0],
+ dpct::dp4a(v1i, u[2 * i + 1], 0)); // SIMD dot product
+ const int dot2 =
+ dpct::dp4a(0x01010101, u[2 * i + 0],
+ dpct::dp4a(0x01010101, u[2 * i + 1], 0)); // sum of u
+
+ sumf_d += d8[i] * (dot1 * sc[i]);
+ sumf_m += d8[i] * (dot2 * m[i]);
+
+ }
+
+ const sycl::float2 dm5f =
+ dm5.convert<float, sycl::rounding_mode::automatic>();
+
+ return dm5f.x() * sumf_d - dm5f.y() * sumf_m;
+}
+
+
+#define VDR_Q6_K_Q8_1_MMVQ 1
+
+// contiguous v/x values
+static __dpct_inline__ float
+vec_dot_q6_K_q8_1_impl_mmvq(const int &vl, const int &vh,
+ const int *__restrict__ u,
+ const int8_t *__restrict__ scales, const float &d,
+ const float *__restrict__ d8) {
+
+ float sumf = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR6_K; ++i) {
+ const int sc = scales[4*i];
+
+ const int vil = (vl >> (4*i)) & 0x0F0F0F0F;
+
+ const int vih = ((vh >> (4*i)) << 4) & 0x30303030;
+
+ const int vi = dpct::vectorized_binary<sycl::char4>(
+ (vil | vih), 0x20202020, dpct::sub_sat()); // vi = (vil | vih) - 32
+
+ sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product
+ }
+
+ return d*sumf;
+}
+
+// VDR = vec dot ratio, how many contiguous integers each thread processes when the vec dot kernel is called
+// MMVQ = mul_mat_vec_q, MMQ = mul_mat_q
+
+template <ggml_type T> struct reorder_vec_dot_q_sycl {
+ static_assert(T != T, "ggml_type for reorder vecdot not implemented");
+};
+
+template <> struct reorder_vec_dot_q_sycl<GGML_TYPE_Q4_0> {
+ static constexpr ggml_type gtype = GGML_TYPE_Q4_0;
+
+ using q4_0_block = ggml_sycl_reordered::block_q_t<GGML_TYPE_Q4_0>;
+ using q4_0_traits = typename q4_0_block::traits;
+
+ __dpct_inline__ float vec_dot_q4_0_q8_1_impl(const int * v, const int * u, const float & d4, const sycl::half2 & ds8) {
+ int sumi = 0;
+
+#pragma unroll
+ for (size_t i = 0; i < q4_0_traits::vdr_mmvq; ++i) {
+ const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
+ const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
+
+ // SIMD dot product of quantized values
+ sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
+ sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
+ }
+
+ const sycl::float2 ds8f = ds8.convert<float, sycl::rounding_mode::automatic>();
+
+ // second part effectively subtracts 8 from each quant value
+ return d4 * (sumi * ds8f.x() - (8 * q4_0_traits::vdr_mmvq / q4_0_traits::qi) * ds8f.y());
+ }
+
+ __dpct_inline__ float operator()(const void * __restrict__ vbq, const std::pair<int, int> ibx_offset,
+ const std::pair<int, int> d_offset, const int8_t * q8_1_quant_ptr,
+ const sycl::half2 * q8_1_ds, const int & iqs) {
+ const uint8_t * bq4_0 = static_cast<const uint8_t *>(vbq) + ibx_offset.first;
+ const ggml_half d = *(reinterpret_cast<const ggml_half *>(static_cast<const uint8_t *>(vbq) + d_offset.first));
+ int v[q4_0_traits::vdr_mmvq];
+ int u[2 * q4_0_traits::vdr_mmvq];
+
+
+#pragma unroll
+ for (size_t i = 0; i < q4_0_traits::vdr_mmvq; ++i) {
+ v[i] = get_int_from_uint8(bq4_0, iqs + i);
+ u[2 * i + 0] = get_int_from_int8_aligned(q8_1_quant_ptr, iqs + i);
+ u[2 * i + 1] = get_int_from_int8_aligned(q8_1_quant_ptr, iqs + i + q4_0_traits::qi);
+ }
+
+ return vec_dot_q4_0_q8_1_impl(v, u, d, *q8_1_ds);
+ };
+};
+
+static inline float vec_dot_q4_K_q8_1_common(const int * __restrict__ q4, const uint16_t * __restrict__ scales,
+ const ggml_half2 & dm, const block_q8_1 * __restrict__ bq8_1,
+ const int & iqs) {
+ int v[2];
+ int u[2 * QR4_K];
+ float d8[QR4_K];
+
+ v[0] = q4[0];
+ v[1] = q4[4];
+
+ uint16_t aux[2];
+ const int j = (QR4_K * ((iqs / 2) / (QI8_1 / 2))) / 2;
+ if (j < 2) {
+ aux[0] = scales[j + 0] & 0x3f3f;
+ aux[1] = scales[j + 2] & 0x3f3f;
+ } else {
+ aux[0] = ((scales[j + 2] >> 0) & 0x0f0f) | ((scales[j - 2] & 0xc0c0) >> 2);
+ aux[1] = ((scales[j + 2] >> 4) & 0x0f0f) | ((scales[j - 0] & 0xc0c0) >> 2);
+ }
+
+ const uint8_t * sc = (const uint8_t *) aux;
+ const uint8_t * m = sc + 2;
+
+ const int bq8_offset = QR4_K * ((iqs / 2) / (QI8_1 / 2));
+
+ for (int i = 0; i < QR4_K; ++i) {
+ const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
+ d8[i] = bq8i->ds[0];
+
+ const int * q8 = (const int *) bq8i->qs + ((iqs / 2) % 4);
+ u[2 * i + 0] = q8[0];
+ u[2 * i + 1] = q8[4];
+ }
+
+ return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, dm, d8);
+}
+
+template <> struct reorder_vec_dot_q_sycl<GGML_TYPE_Q4_K> {
+ static constexpr ggml_type gtype = GGML_TYPE_Q4_K;
+
+ using q4_k_block = ggml_sycl_reordered::block_q_t<GGML_TYPE_Q4_K>;
+ using q4_k_traits = typename q4_k_block::traits;
+
+ __dpct_inline__ float operator()(const void * __restrict__ vbq, const std::pair<int, int> ibx_offset,
+ const std::pair<int, int> d_offset, const int8_t * q8_1_quant_ptr,
+ const sycl::half2 * q8_1_ds, const int & iqs) {
+ const uint8_t * base = static_cast<const uint8_t *>(vbq);
+ const uint8_t * qs = base + ibx_offset.first;
+ const uint8_t * scs = base + d_offset.first;
+ const ggml_half2 * dms = reinterpret_cast<const ggml_half2 *>(base + d_offset.second);
+
+ const int bq8_offset = QR4_K * ((iqs / 2) / (QI8_1 / 2));
+ const int * q4 = (const int *) (qs + 16 * bq8_offset + 4 * ((iqs / 2) % 4));
+ const uint16_t * scales = (const uint16_t *) scs;
+
+ int v[2];
+ int u[2 * QR4_K];
+ float d8[QR4_K];
+
+ v[0] = q4[0];
+ v[1] = q4[4];
+
+ uint16_t aux[2];
+ const int j = (QR4_K * ((iqs / 2) / (QI8_1 / 2))) / 2;
+ if (j < 2) {
+ aux[0] = scales[j + 0] & 0x3f3f;
+ aux[1] = scales[j + 2] & 0x3f3f;
+ } else {
+ aux[0] = ((scales[j + 2] >> 0) & 0x0f0f) | ((scales[j - 2] & 0xc0c0) >> 2);
+ aux[1] = ((scales[j + 2] >> 4) & 0x0f0f) | ((scales[j - 0] & 0xc0c0) >> 2);
+ }
+
+ const uint8_t * sc = (const uint8_t *) aux;
+ const uint8_t * m = sc + 2;
+
+ for (int i = 0; i < QR4_K; ++i) {
+ const int8_t* quant_base_ptr = q8_1_quant_ptr + (bq8_offset + i) * QK8_1;
+ sycl::half2 ds_values = *(q8_1_ds + bq8_offset + i);
+
+ d8[i] = ds_values[0];
+
+ const int * q8 = (const int *) quant_base_ptr + ((iqs / 2) % 4);
+ u[2 * i + 0] = q8[0];
+ u[2 * i + 1] = q8[4];
+ }
+
+ return vec_dot_q4_K_q8_1_impl_vmmq(v, u, sc, m, *dms, d8);
+ }
+};
+
+template <> struct reorder_vec_dot_q_sycl<GGML_TYPE_Q6_K> {
+ static constexpr ggml_type gtype = GGML_TYPE_Q6_K;
+
+ using q6_k_block = ggml_sycl_reordered::block_q_t<GGML_TYPE_Q6_K>;
+ using q6_k_traits = typename q6_k_block::traits;
+
+ __dpct_inline__ float vec_dot_q6_K_q8_1_impl_mmvq(const int vl, const int vh, const int * __restrict__ u,
+ const int8_t * __restrict__ scales, const float d,
+ const float * __restrict__ d8) {
+ float sumf = 0.0f;
+
+#pragma unroll
+ for (int i = 0; i < QR6_K; ++i) {
+ const int sc = scales[4 * i];
+
+ const int vil = (vl >> (4 * i)) & 0x0F0F0F0F;
+
+ const int vih = ((vh >> (4 * i)) << 4) & 0x30303030;
+
+ const int vi = dpct::vectorized_binary<sycl::char4>((vil | vih), 0x20202020,
+ dpct::sub_sat()); // vi = (vil | vih) - 32
+
+ sumf += d8[i] * (dpct::dp4a(vi, u[i], 0) * sc); // SIMD dot product
+ }
+
+ return d * sumf;
+ }
+
+ __dpct_inline__ float operator()(const void * __restrict__ vbq, const std::pair<int, int> ibx_offset,
+ const std::pair<int, int> d_offset, const int8_t * q8_1_quant_ptr, const sycl::half2 * q8_1_ds,
+ const int iqs) {
+ const uint8_t * base = static_cast<const uint8_t *>(vbq);
+ const uint8_t * ql = base + ibx_offset.first;
+ const uint8_t * qh = base + ibx_offset.second;
+ const int8_t * scales = reinterpret_cast<const int8_t *>(base + d_offset.first);
+ const ggml_half * d = (const ggml_half *) (base + d_offset.second);
+
+ const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K / 2)) + (iqs % (QI6_K / 2)) / (QI6_K / 4);
+ const int scale_offset = (QI6_K / 4) * (iqs / (QI6_K / 2)) + (iqs % (QI6_K / 2)) / (QI6_K / 8);
+ const int vh_shift = 2 * ((iqs % (QI6_K / 2)) / (QI6_K / 4));
+
+ const int vl = get_int_from_uint8(ql, iqs);
+ const int vh = get_int_from_uint8(qh, (QI6_K / 4) * (iqs / (QI6_K / 2)) + iqs % (QI6_K / 4)) >> vh_shift;
+
+ const int8_t * scs = scales + scale_offset;
+
+ int u[QR6_K];
+ float d8[QR6_K];
+
+#pragma unroll
+ for (int i = 0; i < QR6_K; ++i) {
+ u[i] = get_int_from_int8_aligned(q8_1_quant_ptr + (bq8_offset + 2 * i) * QK8_1, iqs % QI8_1);
+ const sycl::half2 ds_values = *(q8_1_ds + bq8_offset + 2 * i);
+ d8[i] = ds_values[0];
+ }
+ return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scs, *d, d8);
+ }
+};
+#define VDR_Q4_0_Q8_1_MMVQ 2
+#define VDR_Q4_0_Q8_1_MMQ 4
+
+template <int vdr>
+static __dpct_inline__ float vec_dot_q4_0_q8_1_impl(const int * v, const int * u, const float & d4,
+ const sycl::half2 & ds8) {
+ int sumi = 0;
+#pragma unroll
+ for (int i = 0; i < vdr; ++i) {
+ const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
+ const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
+
+ // SIMD dot product of quantized values
+ sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
+ sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
+ }
+
+ const sycl::float2 ds8f = ds8.convert<float, sycl::rounding_mode::automatic>();
+
+ // second part effectively subtracts 8 from each quant value
+ return d4 * (sumi * ds8f.x() - (8 * vdr / QI4_0) * ds8f.y());
+}
+
+#define VDR_Q4_1_Q8_1_MMVQ 2
+#define VDR_Q4_1_Q8_1_MMQ 4
+
+template <int vdr>
+static __dpct_inline__ float vec_dot_q4_1_q8_1_impl(const int *v, const int *u,
+ const sycl::half2 &dm4,
+ const sycl::half2 &ds8) {
+
+ int sumi = 0;
+
+#pragma unroll
+ for (int i = 0; i < vdr; ++i) {
+ const int vi0 = (v[i] >> 0) & 0x0F0F0F0F;
+ const int vi1 = (v[i] >> 4) & 0x0F0F0F0F;
+
+ // SIMD dot product of quantized values
+ sumi = dpct::dp4a(vi0, u[2 * i + 0], sumi);
+ sumi = dpct::dp4a(vi1, u[2 * i + 1], sumi);
+ }
+
+#ifdef GGML_SYCL_F16
+ const sycl::float2 tmp =
+ (dm4 * ds8).convert<float, sycl::rounding_mode::automatic>();
+ const float d4d8 = tmp.x();
+ const float m4s8 = tmp.y();
+#else
+ const sycl::float2 dm4f =
+ dm4.convert<float, sycl::rounding_mode::automatic>();
+ const sycl::float2 ds8f =
+ ds8.convert<float, sycl::rounding_mode::automatic>();
+ const float d4d8 = dm4f.x() * ds8f.x();
+ const float m4s8 = dm4f.y() * ds8f.y();
+#endif // GGML_SYCL_F16
+
+ // scale second part of sum by QI8_1/(vdr * QR4_1) to compensate for multiple threads adding it
+ return sumi * d4d8 + m4s8 / (QI8_1 / (vdr * QR4_1));
+}
+
+#define VDR_Q5_0_Q8_1_MMVQ 2
+#define VDR_Q5_0_Q8_1_MMQ 4
+
+template <int vdr>
+static __dpct_inline__ float
+vec_dot_q5_0_q8_1_impl(const int *vl, const int *vh, const int *u,
+ const float &d5, const sycl::half2 &ds8) {
+ int sumi = 0;
+
+#pragma unroll
+ for (int i = 0; i < vdr; ++i) {
+ int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
+ vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
+ vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
+ vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
+ vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
+ sumi = dpct::dp4a(vi0, u[2 * i + 0],
+ sumi); // SIMD dot product of quantized values
+
+ int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
+ vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
+ vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
+ vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
+ vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
+ sumi = dpct::dp4a(vi1, u[2 * i + 1],
+ sumi); // SIMD dot product of quantized values
+ }
+
+ const sycl::float2 ds8f =
+ ds8.convert<float, sycl::rounding_mode::automatic>();
+
+ // second part effectively subtracts 16 from each quant value
+ return d5 * (sumi * ds8f.x() - (16 * vdr / QI5_0) * ds8f.y());
+}
+
+#define VDR_Q5_1_Q8_1_MMVQ 2
+#define VDR_Q5_1_Q8_1_MMQ 4
+
+template <int vdr>
+static __dpct_inline__ float
+vec_dot_q5_1_q8_1_impl(const int *vl, const int *vh, const int *u,
+ const sycl::half2 &dm5, const sycl::half2 &ds8) {
+
+ int sumi = 0;
+
+#pragma unroll
+ for (int i = 0; i < vdr; ++i) {
+ int vi0 = (vl[i] >> 0) & 0x0F0F0F0F; // lower 4 qs bits, still need qh as 5th bits
+ vi0 |= (vh[i] << 4) & 0x00000010; // 0 -> 4
+ vi0 |= (vh[i] << 11) & 0x00001000; // 1 -> 12
+ vi0 |= (vh[i] << 18) & 0x00100000; // 2 -> 20
+ vi0 |= (vh[i] << 25) & 0x10000000; // 3 -> 28
+ sumi = dpct::dp4a(vi0, u[2 * i + 0],
+ sumi); // SIMD dot product of quantized values
+
+ int vi1 = (vl[i] >> 4) & 0x0F0F0F0F; // upper 4 qs bits, still need qh as 5th bits
+ vi1 |= (vh[i] >> 12) & 0x00000010; // 16 -> 4
+ vi1 |= (vh[i] >> 5) & 0x00001000; // 17 -> 12
+ vi1 |= (vh[i] << 2) & 0x00100000; // 18 -> 20
+ vi1 |= (vh[i] << 9) & 0x10000000; // 19 -> 28
+ sumi = dpct::dp4a(vi1, u[2 * i + 1],
+ sumi); // SIMD dot product of quantized values
+ }
+
+#ifdef GGML_SYCL_F16
+ const sycl::float2 tmp =
+ (dm5 * ds8).convert<float, sycl::rounding_mode::automatic>();
+ const float d5d8 = tmp.x();
+ const float m5s8 = tmp.y();
+
+
+#else
+ const sycl::float2 dm5f =
+ dm5.convert<float, sycl::rounding_mode::automatic>();
+ const sycl::float2 ds8f =
+ ds8.convert<float, sycl::rounding_mode::automatic>();
+ const float d5d8 = dm5f.x() * ds8f.x();
+ const float m5s8 = dm5f.y() * ds8f.y();
+#endif // GGML_SYCL_F16
+
+ // scale second part of sum by QI5_1 / vdr to compensate for multiple threads adding it
+ return sumi*d5d8 + m5s8 / (QI5_1 / vdr);
+}
+
+#define VDR_Q8_0_Q8_1_MMVQ 2
+#define VDR_Q8_0_Q8_1_MMQ 8
+
+template <int vdr>
+static __dpct_inline__ float vec_dot_q8_0_q8_1_impl(const int *v, const int *u,
+ const float &d8_0,
+ const float &d8_1) {
+
+ int sumi = 0;
+
+#pragma unroll
+ for (int i = 0; i < vdr; ++i) {
+ // SIMD dot product of quantized values
+ sumi = dpct::dp4a(v[i], u[i], sumi);
+ }
+
+ return d8_0*d8_1 * sumi;
+}
+
+template <int vdr>
+static __dpct_inline__ float vec_dot_q8_1_q8_1_impl(const int *v, const int *u,
+ const sycl::half2 &dm8,
+ const sycl::half2 &ds8) {
+
+ int sumi = 0;
+
+#pragma unroll
+ for (int i = 0; i < vdr; ++i) {
+ // SIMD dot product of quantized values
+ sumi = dpct::dp4a(v[i], u[i], sumi);
+ }
+
+#ifdef GGML_SYCL_F16
+ const sycl::float2 tmp =
+ (dm8 * ds8).convert<float, sycl::rounding_mode::automatic>();
+ const float d8d8 = tmp.x();
+ const float m8s8 = tmp.y();
+#else
+ const sycl::float2 dm8f =
+ dm8.convert<float, sycl::rounding_mode::automatic>();
+ const sycl::float2 ds8f =
+ ds8.convert<float, sycl::rounding_mode::automatic>();
+ const float d8d8 = dm8f.x() * ds8f.x();
+ const float m8s8 = dm8f.y() * ds8f.y();
+#endif // GGML_SYCL_F16
+
+ // scale second part of sum by QI8_1/ vdr to compensate for multiple threads adding it
+ return sumi*d8d8 + m8s8 / (QI8_1 / vdr);
+}
+
+static __dpct_inline__ float
+vec_dot_q4_0_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q4_0 * bq4_0 = (const block_q4_0 *) vbq;
+
+ int v[VDR_Q4_0_Q8_1_MMVQ];
+ int u[2 * VDR_Q4_0_Q8_1_MMVQ];
+
+#pragma unroll
+ for (int i = 0; i < VDR_Q4_0_Q8_1_MMVQ; ++i) {
+ v[i] = get_int_from_uint8(bq4_0->qs, iqs + i);
+ u[2 * i + 0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
+ u[2 * i + 1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_0);
+ }
+
+ return vec_dot_q4_0_q8_1_impl<VDR_Q4_0_Q8_1_MMVQ>(v, u, bq4_0->d, bq8_1->ds);
+}
+
+static __dpct_inline__ float
+vec_dot_q4_1_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q4_1 * bq4_1 = (const block_q4_1 *) vbq;
+
+ int v[VDR_Q4_1_Q8_1_MMVQ];
+ int u[2*VDR_Q4_1_Q8_1_MMVQ];
+
+#pragma unroll
+ for (int i = 0; i < VDR_Q4_1_Q8_1_MMVQ; ++i) {
+ v[i] = get_int_from_uint8_aligned(bq4_1->qs, iqs + i);
+ u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
+ u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI4_1);
+ }
+
+ return vec_dot_q4_1_q8_1_impl<VDR_Q4_1_Q8_1_MMVQ>(v, u, bq4_1->dm, bq8_1->ds);
+}
+
+#define VDR_MXFP4_Q8_1_MMVQ 2
+#define VDR_MXFP4_Q8_1_MMQ 4
+
+static __dpct_inline__ float vec_dot_mxfp4_q8_1(const void * __restrict__ vbq,
+ const block_q8_1 * __restrict__ bq8_1,
+ const int & iqs) {
+ const block_mxfp4 * bq4 = (const block_mxfp4 *) vbq;
+
+ const int * q8 = (const int *) bq8_1->qs + iqs;
+
+ int sumi = 0;
+#pragma unroll
+ for (int l = 0; l < VDR_MXFP4_Q8_1_MMVQ; ++l) {
+ const int aux_q4 = get_int_b1(bq4->qs, iqs + l);
+ const sycl::int2 v = get_int_from_table_16(aux_q4, kvalues_mxfp4);
+ sumi = ggml_sycl_dp4a(v.x(), q8[l + 0], sumi);
+ sumi = ggml_sycl_dp4a(v.y(), q8[l + 4], sumi);
+ }
+
+ const float d = ggml_sycl_e8m0_to_fp32(bq4->e) * 0.5f * (bq8_1->ds)[0];
+ return d * sumi;
+}
+
+
+static __dpct_inline__ float
+vec_dot_q5_0_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q5_0 * bq5_0 = (const block_q5_0 *) vbq;
+
+ int vl[VDR_Q5_0_Q8_1_MMVQ];
+ int vh[VDR_Q5_0_Q8_1_MMVQ];
+ int u[2*VDR_Q5_0_Q8_1_MMVQ];
+
+#pragma unroll
+ for (int i = 0; i < VDR_Q5_0_Q8_1_MMVQ; ++i) {
+ vl[i] = get_int_from_uint8(bq5_0->qs, iqs + i);
+ vh[i] = get_int_from_uint8(bq5_0->qh, 0) >> (4 * (iqs + i));
+ u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
+ u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_0);
+ }
+
+ return vec_dot_q5_0_q8_1_impl<VDR_Q5_0_Q8_1_MMVQ>(vl, vh, u, bq5_0->d, bq8_1->ds);
+}
+
+static __dpct_inline__ float
+vec_dot_q5_1_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q5_1 * bq5_1 = (const block_q5_1 *) vbq;
+
+ int vl[VDR_Q5_1_Q8_1_MMVQ];
+ int vh[VDR_Q5_1_Q8_1_MMVQ];
+ int u[2*VDR_Q5_1_Q8_1_MMVQ];
+
+#pragma unroll
+ for (int i = 0; i < VDR_Q5_1_Q8_1_MMVQ; ++i) {
+ vl[i] = get_int_from_uint8_aligned(bq5_1->qs, iqs + i);
+ vh[i] = get_int_from_uint8_aligned(bq5_1->qh, 0) >> (4 * (iqs + i));
+ u[2*i+0] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
+ u[2*i+1] = get_int_from_int8_aligned(bq8_1->qs, iqs + i + QI5_1);
+ }
+
+ return vec_dot_q5_1_q8_1_impl<VDR_Q5_1_Q8_1_MMVQ>(vl, vh, u, bq5_1->dm, bq8_1->ds);
+}
+
+static __dpct_inline__ float
+vec_dot_q8_0_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q8_0 * bq8_0 = (const block_q8_0 *) vbq;
+
+ int v[VDR_Q8_0_Q8_1_MMVQ];
+ int u[VDR_Q8_0_Q8_1_MMVQ];
+
+#pragma unroll
+ for (int i = 0; i < VDR_Q8_0_Q8_1_MMVQ; ++i) {
+ v[i] = get_int_from_int8(bq8_0->qs, iqs + i);
+ u[i] = get_int_from_int8_aligned(bq8_1->qs, iqs + i);
+ }
+
+ return vec_dot_q8_0_q8_1_impl<VDR_Q8_0_Q8_1_MMVQ>(v, u, bq8_0->d,
+ bq8_1->ds[0]);
+}
+
+static __dpct_inline__ float
+vec_dot_q2_K_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q2_K * bq2_K = (const block_q2_K *) vbq;
+
+ const int bq8_offset = QR2_K * (iqs / QI8_1);
+ const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
+
+ const uint8_t * scales = bq2_K->scales + scale_offset;
+
+ const int v = get_int_from_uint8_aligned(bq2_K->qs, iqs);
+ int u[QR2_K];
+ float d8[QR2_K];
+
+#pragma unroll
+ for (int i = 0; i < QR2_K; ++ i) {
+ u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
+ d8[i] = bq8_1[bq8_offset + i].ds[0];
+ }
+
+ return vec_dot_q2_K_q8_1_impl_mmvq(v, u, scales, bq2_K->dm, d8);
+}
+
+static __dpct_inline__ float
+vec_dot_q3_K_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q3_K * bq3_K = (const block_q3_K *) vbq;
+
+ const int bq8_offset = QR3_K * (iqs / (QI3_K/2));
+ const int scale_offset = iqs - iqs % QI8_1 + (iqs % QI8_1) / (QI8_1/2);
+
+ const float d = bq3_K->d;
+
+ const int vl = get_int_from_uint8(bq3_K->qs, iqs);
+
+ // invert the mask with ~ so that a 0/1 results in 4/0 being subtracted
+ const int vh = ~get_int_from_uint8(bq3_K->hmask, iqs % (QI3_K/2)) >> bq8_offset;
+
+ int u[QR3_K];
+ float d8[QR3_K];
+
+#pragma unroll
+ for (int i = 0; i < QR3_K; ++i) {
+ u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + i].qs, iqs % QI8_1);
+ d8[i] = bq8_1[bq8_offset + i].ds[0];
+ }
+
+ return vec_dot_q3_K_q8_1_impl_mmvq(vl, vh, u, bq3_K->scales, scale_offset, d, d8);
+}
+
+static __dpct_inline__ float vec_dot_q4_K_q8_1(const void * __restrict__ vbq, const block_q8_1 * __restrict__ bq8_1,
+ const int & iqs) {
+#ifndef GGML_QKK_64
+
+ const block_q4_K * bq4_K = (const block_q4_K *) vbq;
+
+ const int bq8_offset = QR4_K * ((iqs / 2) / (QI8_1 / 2));
+ const int * q4 = (const int *) (bq4_K->qs + 16 * bq8_offset + 4 * ((iqs / 2) % 4));
+ const uint16_t * scales = (const uint16_t *) bq4_K->scales;
+
+ return vec_dot_q4_K_q8_1_common(q4, scales, bq4_K->dm, bq8_1, iqs);
+
+#else
+
+#if __SYCL_ARCH__ >= VER_4VEC // lowest compute capability for integer intrinsics
+ const block_q4_K * bq4_K = (const block_q4_K *) vbq;
+
+ float sumf_d = 0.0f;
+ float sumf_m = 0.0f;
+
+ uint16_t aux16[2];
+ const uint8_t * s = (const uint8_t *)aux16;
+
+ const uint16_t * a = (const uint16_t *)bq4_K->scales;
+ aux16[0] = a[0] & 0x0f0f;
+ aux16[1] = (a[0] >> 4) & 0x0f0f;
+
+ const float dall = bq4_K->dm[0];
+ const float dmin = bq4_K->dm[1];
+
+ const float d8_1 = bq8_1[0].ds[0];
+ const float d8_2 = bq8_1[1].ds[1];
+
+ const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
+ const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
+ const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
+ const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
+
+ const int * q4 = (const int *)bq4_K->qs + (iqs/2);
+ const int v1 = q4[0];
+ const int v2 = q4[4];
+
+ const int dot1 = dpct::dp4a(ui2, v2 & 0x0f0f0f0f, dpct::dp4a(ui1, v1 & 0x0f0f0f0f, 0));
+ const int dot2 = dpct::dp4a(ui4, (v2 >> 4) & 0x0f0f0f0f, dpct::dp4a(ui3, (v1 >> 4) & 0x0f0f0f0f, 0));
+ const int dot3 = dpct::dp4a(0x01010101, ui2, dpct::dp4a(0x01010101, ui1, 0));
+ const int dot4 = dpct::dp4a(0x01010101, ui4, dpct::dp4a(0x01010101, ui3, 0));
+
+ sumf_d += d8_1 * (dot1 * s[0]) + d8_2 * (dot2 * s[1]);
+ sumf_m += d8_1 * (dot3 * s[2]) + d8_2 * (dot4 * s[3]);
+
+ return dall * sumf_d - dmin * sumf_m;
+
+#else
+ bad_arch();
+#endif // __SYCL_ARCH__ >= VER_4VEC
+
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_q5_K_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+#ifndef GGML_QKK_64
+ const block_q5_K * bq5_K = (const block_q5_K *) vbq;
+
+ int vl[2];
+ int vh[2];
+ int u[2*QR5_K];
+ float d8[QR5_K];
+
+ const int bq8_offset = QR5_K * ((iqs/2) / (QI8_1/2));
+ const int * ql = (const int *)(bq5_K->qs + 16 * bq8_offset + 4 * ((iqs/2)%4));
+ const int * qh = (const int *)(bq5_K->qh + 4 * ((iqs/2)%4));
+
+ vl[0] = ql[0];
+ vl[1] = ql[4];
+
+ vh[0] = qh[0] >> bq8_offset;
+ vh[1] = qh[4] >> bq8_offset;
+
+ const uint16_t * scales = (const uint16_t *)bq5_K->scales;
+ uint16_t aux[2];
+ const int j = bq8_offset/2;
+ if (j < 2) {
+ aux[0] = scales[j+0] & 0x3f3f;
+ aux[1] = scales[j+2] & 0x3f3f;
+ } else {
+ aux[0] = ((scales[j+2] >> 0) & 0x0f0f) | ((scales[j-2] & 0xc0c0) >> 2);
+ aux[1] = ((scales[j+2] >> 4) & 0x0f0f) | ((scales[j-0] & 0xc0c0) >> 2);
+ }
+ const uint8_t * sc = (const uint8_t *)aux;
+ const uint8_t * m = sc + 2;
+
+#pragma unroll
+ for (int i = 0; i < QR5_K; ++i) {
+ const block_q8_1 * bq8i = bq8_1 + bq8_offset + i;
+ d8[i] = bq8i->ds[0];
+
+ const int * q8 = (const int *)bq8i->qs + ((iqs/2)%4);
+ u[2*i+0] = q8[0];
+ u[2*i+1] = q8[4];
+ }
+
+ return vec_dot_q5_K_q8_1_impl_vmmq(vl, vh, u, sc, m, bq5_K->dm, d8);
+
+#else
+
+#if __SYCL_ARCH__ >= VER_4VEC // lowest compute capability for integer intrinsics
+ const block_q5_K * bq5_K = (const block_q5_K *) vbq;
+
+ const int8_t * s = bq5_K->scales;
+
+ const float d = bq5_K->d;
+
+ const float d8_1 = bq8_1[0].ds[0];
+ const float d8_2 = bq8_1[1].ds[1];
+
+ const int ui1 = *((const int *)bq8_1[0].qs + (iqs/2));
+ const int ui2 = *((const int *)bq8_1[0].qs + (iqs/2) + 4);
+ const int ui3 = *((const int *)bq8_1[1].qs + (iqs/2));
+ const int ui4 = *((const int *)bq8_1[1].qs + (iqs/2) + 4);
+
+ const int * ql = (const int *)bq5_K->qs + (iqs/2);
+ const int vl1 = ql[0];
+ const int vl2 = ql[4];
+
+ const int step = 4 * (iqs/2); // 0, 4, 8, 12
+ const int im = step/8; // = 0 for iqs = 0, 2, = 1 for iqs = 4, 6
+ const int in = step%8; // 0, 4, 0, 4
+ const int vh = (*((const int *)(bq5_K->qh + in))) >> im;
+
+ const int v1 = (((vh << 4) & 0x10101010) ^ 0x10101010) | ((vl1 >> 0) & 0x0f0f0f0f);
+ const int v2 = (((vh << 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 0) & 0x0f0f0f0f);
+ const int v3 = (((vh >> 0) & 0x10101010) ^ 0x10101010) | ((vl1 >> 4) & 0x0f0f0f0f);
+ const int v4 = (((vh >> 2) & 0x10101010) ^ 0x10101010) | ((vl2 >> 4) & 0x0f0f0f0f);
+
+ const float sumf_d = d8_1 * (dpct::dp4a(ui1, v1, 0) * s[0] + dpct::dp4a(ui2, v2, 0) * s[1])
+ + d8_2 * (dpct::dp4a(ui3, v3, 0) * s[2] + dpct::dp4a(ui4, v4, 0) * s[3]);
+
+ return d * sumf_d;
+
+#else
+ bad_arch();
+#endif // __SYCL_ARCH__ >= VER_4VEC
+
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_q6_K_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_q6_K * bq6_K = (const block_q6_K *) vbq;
+
+ const int bq8_offset = 2 * QR6_K * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/4);
+ const int scale_offset = (QI6_K/4) * (iqs / (QI6_K/2)) + (iqs % (QI6_K/2)) / (QI6_K/8);
+ const int vh_shift = 2 * ((iqs % (QI6_K/2)) / (QI6_K/4));
+
+ const int vl = get_int_from_uint8(bq6_K->ql, iqs);
+ const int vh = get_int_from_uint8(bq6_K->qh, (QI6_K/4) * (iqs / (QI6_K/2)) + iqs % (QI6_K/4)) >> vh_shift;
+
+ const int8_t * scales = bq6_K->scales + scale_offset;
+
+ int u[QR6_K];
+ float d8[QR6_K];
+
+#pragma unroll
+ for (int i = 0; i < QR6_K; ++i) {
+ u[i] = get_int_from_int8_aligned(bq8_1[bq8_offset + 2*i].qs, iqs % QI8_1);
+ d8[i] = bq8_1[bq8_offset + 2 * i].ds[0];
+ }
+
+ return vec_dot_q6_K_q8_1_impl_mmvq(vl, vh, u, scales, bq6_K->d, d8);
+}
+
+
+static __dpct_inline__ float
+vec_dot_iq2_xxs_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs,
+ const uint64_t *iq2xxs_grid, const uint8_t *ksigns_iq2xs,
+ const uint8_t *kmask_iq2xs) {
+#if QK_K == 256
+ const block_iq2_xxs * bq2 = (const block_iq2_xxs *) vbq;
+
+ const int ib32 = iqs;
+ const uint16_t * q2 = bq2->qs + 4*ib32;
+ const uint8_t * aux8 = (const uint8_t *)q2;
+ const int8_t * q8 = bq8_1[ib32].qs;
+ uint32_t aux32 = q2[2] | (q2[3] << 16);
+ int sumi = 0;
+ for (int l = 0; l < 4; ++l) {
+ const uint8_t * grid = (const uint8_t *)(iq2xxs_grid + aux8[l]);
+ const uint8_t signs = ksigns_iq2xs[aux32 & 127];
+ for (int j = 0; j < 8; ++j) {
+ sumi += q8[j] * grid[j] * (signs & kmask_iq2xs[j] ? -1 : 1);
+ }
+ q8 += 8;
+ aux32 >>= 7;
+ }
+ const float d = (float)bq2->d * (0.5f + aux32) * bq8_1[ib32].ds[0] * 0.25f;
+ return d * sumi;
+#else
+ assert(false);
+ return 0.f;
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_iq2_xs_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs,
+ const uint64_t *iq2xs_grid, const uint64_t *ksigns64) {
+#if DPCT_COMPATIBILITY_TEMP >= \
+ MIN_CC_DP4A // lowest compute capability for integer intrinsics
+#if QK_K == 256
+ const block_iq2_xs * bq2 = (const block_iq2_xs *) vbq;
+
+ const int ib32 = iqs;
+ const uint16_t * q2 = bq2->qs + 4*ib32;
+ const int8_t * q8 = bq8_1[ib32].qs;
+ const uint8_t ls1 = bq2->scales[ib32] & 0xf;
+ const uint8_t ls2 = bq2->scales[ib32] >> 4;
+ int sumi1 = 0;
+ for (int l = 0; l < 2; ++l) {
+ const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511));
+ const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9));
+ const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
+ grid[0] ^ signs[0], signs[0], std::minus<>());
+ const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
+ grid[1] ^ signs[1], signs[1], std::minus<>());
+ sumi1 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi1);
+ sumi1 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi1);
+ q8 += 8;
+ }
+ int sumi2 = 0;
+ for (int l = 2; l < 4; ++l) {
+ const uint32_t * grid = (const uint32_t *)(iq2xs_grid + (q2[l] & 511));
+ const uint32_t * signs = (const uint32_t *)(ksigns64 + (q2[l] >> 9));
+ const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
+ grid[0] ^ signs[0], signs[0], std::minus<>());
+ const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
+ grid[1] ^ signs[1], signs[1], std::minus<>());
+ sumi2 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi2);
+ sumi2 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi2);
+ q8 += 8;
+ }
+ const float d = (float)bq2->d * bq8_1[ib32].ds[0] * 0.25f;
+ return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
+#else
+ assert(false);
+ return 0.f;
+#endif
+#else
+ assert(false);
+ return 0.f;
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_iq2_s_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+#if QK_K == 256
+ const block_iq2_s * bq2 = (const block_iq2_s *) vbq;
+
+ const int ib32 = iqs;
+ const int8_t * q8 = bq8_1[ib32].qs;
+ const uint8_t * signs = bq2->qs + QK_K/8 + 4*ib32;
+ const uint8_t ls1 = bq2->scales[ib32] & 0xf;
+ const uint8_t ls2 = bq2->scales[ib32] >> 4;
+ int sumi1 = 0;
+ for (int l = 0; l < 2; ++l) {
+ const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
+ const uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
+ ((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201,
+ std::equal_to<>());
+ const uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
+ ((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201,
+ std::equal_to<>());
+ const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
+ grid[0] ^ signs0, signs0, std::minus<>());
+ const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
+ grid[1] ^ signs1, signs1, std::minus<>());
+ sumi1 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi1);
+ sumi1 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi1);
+ q8 += 8;
+ }
+ int sumi2 = 0;
+ for (int l = 2; l < 4; ++l) {
+ const uint32_t * grid = (const uint32_t *)(iq2s_grid + (bq2->qs[4*ib32+l] | ((bq2->qh[ib32] << (8-2*l)) & 0x300)));
+ const uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
+ ((signs[l] & 0xf) * 0x01010101) & 0x08040201, 0x08040201,
+ std::equal_to<>());
+ const uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
+ ((signs[l] >> 4) * 0x01010101) & 0x08040201, 0x08040201,
+ std::equal_to<>());
+ const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
+ grid[0] ^ signs0, signs0, std::minus<>());
+ const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
+ grid[1] ^ signs1, signs1, std::minus<>());
+ sumi2 = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi2);
+ sumi2 = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi2);
+ q8 += 8;
+ }
+ const float d = (float)bq2->d * bq8_1[ib32].ds[0] * 0.25f;
+ return d * ((0.5f + ls1) * sumi1 + (0.5f + ls2) * sumi2);
+#else
+ assert(false);
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_iq3_xxs_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs,
+ const uint32_t *iq3xxs_grid, const uint64_t *ksigns64) {
+#if DPCT_COMPATIBILITY_TEMP >= \
+ MIN_CC_DP4A // lowest compute capability for integer intrinsics
+#if QK_K == 256
+ const block_iq3_xxs * bq2 = (const block_iq3_xxs *) vbq;
+
+ const int ib32 = iqs;
+ const uint8_t * q3 = bq2->qs + 8*ib32;
+ const uint16_t * gas = (const uint16_t *)(bq2->qs + QK_K/4) + 2*ib32;
+ const int8_t * q8 = bq8_1[ib32].qs;
+ uint32_t aux32 = gas[0] | (gas[1] << 16);
+ int sumi = 0;
+ for (int l = 0; l < 4; ++l) {
+ const uint32_t * grid1 = iq3xxs_grid + q3[2*l+0];
+ const uint32_t * grid2 = iq3xxs_grid + q3[2*l+1];
+ const uint32_t * signs = (const uint32_t *)(ksigns64 + (aux32 & 127));
+ const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
+ grid1[0] ^ signs[0], signs[0], std::minus<>());
+ const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
+ grid2[0] ^ signs[1], signs[1], std::minus<>());
+ sumi = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi);
+ sumi = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi);
+ q8 += 8;
+ aux32 >>= 7;
+ }
+ const float d = (float)bq2->d * (0.5f + aux32) * bq8_1[ib32].ds[0] * 0.5f;
+ return d * sumi;
+#else
+ assert(false);
+ return 0.f;
+#endif
+#else
+ assert(false);
+ return 0.f;
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_iq3_s_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs,
+ const uint32_t *iq3s_grid) {
+#if QK_K == 256
+ const block_iq3_s * bq2 = (const block_iq3_s *) vbq;
+
+ const int ib32 = iqs;
+ const uint8_t * qs = bq2->qs + 8*ib32;
+ const int8_t * q8 = bq8_1[ib32].qs;
+ int sumi = 0;
+ for (int l = 0; l < 4; ++l) {
+ const uint32_t * grid1 = iq3s_grid + (qs[2*l+0] | ((bq2->qh[ib32] << (8 - 2*l)) & 256));
+ const uint32_t * grid2 = iq3s_grid + (qs[2*l+1] | ((bq2->qh[ib32] << (7 - 2*l)) & 256));
+ uint32_t signs0 = dpct::vectorized_binary<sycl::uchar4>(
+ ((bq2->signs[4 * ib32 + l] & 0xf) * 0x01010101) & 0x08040201,
+ 0x08040201, std::equal_to<>());
+ uint32_t signs1 = dpct::vectorized_binary<sycl::uchar4>(
+ ((bq2->signs[4 * ib32 + l] >> 4) * 0x01010101) & 0x08040201,
+ 0x08040201, std::equal_to<>());
+ const int grid_l = dpct::vectorized_binary<sycl::uchar4>(
+ grid1[0] ^ signs0, signs0, std::minus<>());
+ const int grid_h = dpct::vectorized_binary<sycl::uchar4>(
+ grid2[0] ^ signs1, signs1, std::minus<>());
+ sumi = dpct::dp4a(grid_l, *((const int *)q8 + 0), sumi);
+ sumi = dpct::dp4a(grid_h, *((const int *)q8 + 1), sumi);
+ q8 += 8;
+ }
+ const float d =
+ (float)bq2->d *
+ (1 + 2 * ((bq2->scales[ib32 / 2] >> 4 * (ib32 % 2)) & 0xf)) *
+ bq8_1[ib32].ds[0];
+ return d * sumi;
+#else
+ assert(false);
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_iq1_s_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs,
+ const uint32_t *iq1s_grid_gpu) {
+#if QK_K == 256
+ const block_iq1_s * bq1 = (const block_iq1_s *) vbq;
+
+ const int ib32 = iqs;
+ int sumi = 0;
+ const int * q8 = (const int *)bq8_1[ib32].qs;
+ for (int l = 0; l < 4; ++l) {
+ const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[ib32] >> 3*l) & 7) << 8)));
+ int grid0 = grid[0] & 0x0f0f0f0f;
+ int grid1 = (grid[0] >> 4) & 0x0f0f0f0f;
+ sumi = dpct::dp4a(q8[2 * l + 1], grid1,
+ dpct::dp4a(q8[2 * l + 0], grid0, sumi));
+ }
+
+ const float delta = bq1->qh[ib32] & 0x8000 ? -1-IQ1S_DELTA : -1+IQ1S_DELTA;
+ const float d1q = (float)bq1->d * (2*((bq1->qh[ib32] >> 12) & 7) + 1);
+ const float d = d1q * bq8_1[ib32].ds[0];
+ const float m = d1q * bq8_1[ib32].ds[1];
+ return d * sumi + m * delta;
+#else
+ assert(false);
+#endif
+}
+
+static __dpct_inline__ float
+vec_dot_iq1_m_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+#if QK_K == 256
+ const block_iq1_m * bq1 = (const block_iq1_m *) vbq;
+
+ const int ib32 = iqs;
+ int sumi[2] = {0, 0};
+ float sumf[2] = {0.f, 0.f};
+
+ const int * q8 = (const int *)bq8_1[ib32].qs;
+ for (int l = 0; l < 4; ++l) {
+ const int * grid = (const int *)(iq1s_grid_gpu + (bq1->qs[4*ib32+l] | (((bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 7) << 8)));
+ int grid0 = grid[0] & 0x0f0f0f0f;
+ int grid1 = (grid[0] >> 4) & 0x0f0f0f0f;
+ sumi[l / 2] = dpct::dp4a(q8[2 * l + 1], grid1,
+ dpct::dp4a(q8[2 * l + 0], grid0, sumi[l / 2]));
+ const float delta = (bq1->qh[2*ib32+l/2] >> 4*(l%2)) & 0x08 ? -1-IQ1M_DELTA : -1+IQ1M_DELTA;
+ const int sumy = dpct::dp4a(q8[2 * l + 1], 0x01010101,
+ dpct::dp4a(q8[2 * l + 0], 0x01010101, 0));
+ sumf[l/2] += delta*sumy;
+ }
+
+ iq1m_scale_t scale;
+ const uint16_t * sc = (const uint16_t *)bq1->scales;
+ scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
+ const float d = (float)scale.f16 * bq8_1[ib32].ds[0];
+ return d * ((sumi[0] + sumf[0]) * (2*((sc[ib32/2] >> 6*(ib32%2)) & 0x7) + 1) + (sumi[1] + sumf[1]) * (2*((sc[ib32/2] >> (6*(ib32%2)+3)) & 0x7) + 1));
+#else
+ assert(false);
+#endif
+}
+
+
+static __dpct_inline__ float
+vec_dot_iq4_nl_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+ const block_iq4_nl * bq = (const block_iq4_nl *) vbq;
+
+ const uint16_t * q4 = (const uint16_t *)bq->qs + 2*iqs;
+ const int32_t * q8 = (const int32_t *)bq8_1->qs + iqs;
+
+ const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
+
+ int v1, v2;
+ int sumi1 = 0, sumi2 = 0;
+ for (int l = 0; l < VDR_Q4_0_Q8_1_MMVQ; ++l) {
+ const uint32_t aux = q4[2*l] | (q4[2*l+1] << 16);
+ get_int_from_table_16(aux, values, v1, v2);
+ sumi1 = dpct::dp4a(v1, q8[l + 0], sumi1);
+ sumi2 = dpct::dp4a(v2, q8[l + 4], sumi2);
+ }
+
+ const float d = (float)bq->d * bq8_1->ds[0];
+ return d * (sumi1 + sumi2);
+}
+
+
+static __dpct_inline__ float
+vec_dot_iq4_xs_q8_1(const void *__restrict__ vbq,
+ const block_q8_1 *__restrict__ bq8_1, const int &iqs) {
+
+#if QK_K == 256
+ const block_iq4_xs * bq4 = (const block_iq4_xs *) vbq;
+ const uint8_t * values = (const uint8_t *)kvalues_iq4nl;
+
+ // iqs is 0...7
+ const int ib32 = iqs;
+ const int32_t * q8 = (const int *)bq8_1[ib32].qs;
+ const uint32_t * q4 = (const uint32_t *)bq4->qs + 4*ib32;
+ const int8_t ls = ((bq4->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((bq4->scales_h >> 2*ib32) & 3) << 4);
+ const float d = (float)bq4->d * (ls - 32) * bq8_1[ib32].ds[0];
+ int v1, v2;
+ int sumi1 = 0, sumi2 = 0;
+ for (int j = 0; j < 4; ++j) {
+ get_int_from_table_16(q4[j], values, v1, v2);
+ sumi1 = dpct::dp4a(v1, q8[j + 0], sumi1);
+ sumi2 = dpct::dp4a(v2, q8[j + 4], sumi2);
+ }
+ return d * (sumi1 + sumi2);
+#else
+ assert(false);
+#endif
+}
+
+#endif // GGML_SYCL_VECDOTQ_HPP
diff --git a/llama.cpp/ggml/src/ggml-sycl/wkv.cpp b/llama.cpp/ggml/src/ggml-sycl/wkv.cpp
new file mode 100644
index 0000000..b56e0c2
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/wkv.cpp
@@ -0,0 +1,293 @@
+#include <sycl/sycl.hpp>
+#include "wkv.hpp"
+
+constexpr int WKV_BLOCK_SIZE = 64;
+
+// Helper function for the main kernel
+template <int block_size>
+static void rwkv_wkv6_f32_kernel(
+ const int B, const int T, const int C, const int H,
+ const float* k, const float* v, const float* r,
+ const float* tf, const float* td, const float* s,
+ float* dst, const sycl::nd_item<3>& item_ct1, float* shared_mem) {
+
+ const int tid = item_ct1.get_local_id(2);
+ const int bid = item_ct1.get_group(2);
+
+ const int head_size = block_size;
+ const int batch_i = bid / H;
+ const int head_i = bid % H;
+ const int state_size = C * head_size;
+ const int n_seq_tokens = T / B;
+
+ // Set up shared memory pointers
+ float* _k = shared_mem;
+ float* _r = _k + head_size;
+ float* _tf = _r + head_size;
+ float* _td = _tf + head_size;
+
+ // Local state array
+ float state[block_size];
+
+ // Load initial state
+ #pragma unroll
+ for (int i = 0; i < head_size; i++) {
+ state[i] = s[batch_i * state_size + head_i * head_size * head_size + i * head_size + tid];
+ }
+
+ // Sync threads before shared memory operations
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ // Load time-mixing parameters
+ _tf[tid] = tf[head_i * head_size + tid];
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ // Main sequence processing loop
+ for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid;
+ t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid;
+ t += C) {
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ // Load current timestep data to shared memory
+ _k[tid] = k[t];
+ _r[tid] = r[t];
+ _td[tid] = td[t];
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ const float _v = v[t];
+ float y = 0;
+
+ // Process in chunks of 4 for better vectorization
+ sycl::float4 k4, r4, tf4, td4, s4;
+ #pragma unroll
+ for (int j = 0; j < head_size; j += 4) {
+ // Load data in vec4 chunks
+ k4 = sycl::float4(_k[j], _k[j+1], _k[j+2], _k[j+3]);
+ r4 = sycl::float4(_r[j], _r[j+1], _r[j+2], _r[j+3]);
+ tf4 = sycl::float4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]);
+ td4 = sycl::float4(_td[j], _td[j+1], _td[j+2], _td[j+3]);
+ s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]);
+
+ // Compute key-value product
+ sycl::float4 kv4 = k4 * _v;
+
+ // Accumulate weighted sum
+ y += sycl::dot(r4, tf4 * kv4 + s4);
+
+ // Update state
+ s4 = s4 * td4 + kv4;
+
+ // Store updated state
+ state[j] = s4.x();
+ state[j+1] = s4.y();
+ state[j+2] = s4.z();
+ state[j+3] = s4.w();
+ }
+
+ dst[t] = y;
+ }
+
+ // Save final state
+ #pragma unroll
+ for (int i = 0; i < head_size; i++) {
+ dst[T * C + batch_i * state_size + head_i * head_size * head_size + i * head_size + tid] = state[i];
+ }
+}
+
+template <int block_size>
+static void rwkv_wkv7_f32_kernel(
+ const int B, const int T, const int C, const int H,
+ const float* r, const float* w, const float* k, const float* v,
+ const float* a, const float* b, const float* s,
+ float* dst, const sycl::nd_item<3>& item_ct1, float* shared_mem) {
+
+ const int tid = item_ct1.get_local_id(2);
+ const int bid = item_ct1.get_group(2);
+
+ const int head_size = block_size;
+ const int batch_i = bid / H;
+ const int head_i = bid % H;
+ const int state_size = C * head_size;
+ const int n_seq_tokens = T / B;
+
+ float* _r = shared_mem;
+ float* _w = _r + head_size;
+ float* _k = _w + head_size;
+ float* _a = _k + head_size;
+ float* _b = _a + head_size;
+
+ float state[block_size];
+
+ #pragma unroll
+ for (int i = 0; i < head_size; i++) {
+ state[i] = s[batch_i * state_size + head_i * head_size * head_size + tid * head_size + i];
+ }
+
+ for (int t = batch_i * n_seq_tokens * C + head_i * head_size + tid;
+ t < (batch_i + 1) * n_seq_tokens * C + head_i * head_size + tid;
+ t += C) {
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ _r[tid] = r[t];
+ _w[tid] = w[t];
+ _k[tid] = k[t];
+ _a[tid] = a[t];
+ _b[tid] = b[t];
+
+ item_ct1.barrier(sycl::access::fence_space::local_space);
+
+ const float _v = v[t];
+ float y = 0, sa = 0;
+ sycl::float4 a4, s4;
+
+ #pragma unroll
+ for (int j = 0; j < head_size; j += 4) {
+ a4 = sycl::float4(_a[j], _a[j+1], _a[j+2], _a[j+3]);
+ s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]);
+ sa += sycl::dot(a4, s4);
+ }
+
+ sycl::float4 r4, w4, k4, b4;
+ #pragma unroll
+ for (int j = 0; j < head_size; j += 4) {
+ r4 = sycl::float4(_r[j], _r[j+1], _r[j+2], _r[j+3]);
+ w4 = sycl::float4(_w[j], _w[j+1], _w[j+2], _w[j+3]);
+ k4 = sycl::float4(_k[j], _k[j+1], _k[j+2], _k[j+3]);
+ b4 = sycl::float4(_b[j], _b[j+1], _b[j+2], _b[j+3]);
+ s4 = sycl::float4(state[j], state[j+1], state[j+2], state[j+3]);
+
+ sycl::float4 kv4 = k4 * _v;
+
+ s4 = s4 * w4 + kv4 + sa * b4;
+ y += sycl::dot(r4, s4);
+
+ state[j] = s4.x();
+ state[j+1] = s4.y();
+ state[j+2] = s4.z();
+ state[j+3] = s4.w();
+ }
+
+ dst[t] = y;
+ }
+
+ #pragma unroll
+ for (int i = 0; i < head_size; i++) {
+ dst[T * C + batch_i * state_size + head_i * head_size * head_size + tid * head_size + i] = state[i];
+ }
+}
+
+void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/6);
+ const float* k_d = (const float*)dst->src[0]->data;
+ const float* v_d = (const float*)dst->src[1]->data;
+ const float* r_d = (const float*)dst->src[2]->data;
+ const float* tf_d = (const float*)dst->src[3]->data;
+ const float* td_d = (const float*)dst->src[4]->data;
+ const float* s_d = (const float*)dst->src[5]->data;
+ float* dst_d = (float*)dst->data;
+
+ const int64_t B = dst->src[5]->ne[1];
+ const int64_t T = dst->src[0]->ne[2];
+ const int64_t C = dst->ne[0];
+ const int64_t H = dst->src[0]->ne[1];
+
+ GGML_ASSERT(dst->src[5]->type == GGML_TYPE_F32);
+ GGML_ASSERT(C % H == 0);
+ GGML_ASSERT(C / H == WKV_BLOCK_SIZE || C / H == WKV_BLOCK_SIZE * 2); // The current sycl kernel is designed for RWKV6, HEAD_SIZE == 64
+
+ dpct::queue_ptr stream = ctx.stream();
+
+ // Calculate execution configuration
+ const size_t shared_mem_size = C / H * 4 * sizeof(float); // For k, r, tf, td
+ sycl::range<3> block_dims(1, 1, C / H);
+ sycl::range<3> grid_dims(1, 1, B * H);
+
+ // Submit kernel
+ if (C / H == WKV_BLOCK_SIZE) {
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> shared_mem_acc(shared_mem_size, cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ rwkv_wkv6_f32_kernel<WKV_BLOCK_SIZE>(
+ B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d,
+ item_ct1, (float*)shared_mem_acc.get_multi_ptr<sycl::access::decorated::no>().get()
+ );
+ });
+ });
+ } else {
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> shared_mem_acc(shared_mem_size, cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ rwkv_wkv6_f32_kernel<WKV_BLOCK_SIZE * 2>(
+ B, T, C, H, k_d, v_d, r_d, tf_d, td_d, s_d, dst_d,
+ item_ct1, (float*)shared_mem_acc.get_multi_ptr<sycl::access::decorated::no>().get()
+ );
+ });
+ });
+ }
+}
+
+void ggml_sycl_op_rwkv_wkv7(ggml_backend_sycl_context& ctx, ggml_tensor* dst) {
+ scope_op_debug_print scope_dbg_print(__func__, dst, /*num_src=*/7);
+ const float* r_d = (const float*)dst->src[0]->data;
+ const float* w_d = (const float*)dst->src[1]->data;
+ const float* k_d = (const float*)dst->src[2]->data;
+ const float* v_d = (const float*)dst->src[3]->data;
+ const float* a_d = (const float*)dst->src[4]->data;
+ const float* b_d = (const float*)dst->src[5]->data;
+ const float* s_d = (const float*)dst->src[6]->data;
+ float* dst_d = (float*)dst->data;
+
+ const int64_t B = dst->src[6]->ne[1];
+ const int64_t T = dst->src[0]->ne[2];
+ const int64_t C = dst->ne[0];
+ const int64_t H = dst->src[0]->ne[1];
+
+ GGML_ASSERT(dst->src[6]->type == GGML_TYPE_F32);
+ GGML_ASSERT(C % H == 0);
+ GGML_ASSERT(C / H == WKV_BLOCK_SIZE || C / H == WKV_BLOCK_SIZE * 2);
+
+ dpct::queue_ptr stream = ctx.stream();
+
+ // Calculate execution configuration
+ const size_t shared_mem_size = C / H * 5 * sizeof(float); // For r, w, k, a, b
+ sycl::range<3> block_dims(1, 1, C / H);
+ sycl::range<3> grid_dims(1, 1, B * H);
+
+ // Submit kernel
+ if (C / H == WKV_BLOCK_SIZE) {
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> shared_mem_acc(shared_mem_size, cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ rwkv_wkv7_f32_kernel<WKV_BLOCK_SIZE>(
+ B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d,
+ item_ct1, (float*)shared_mem_acc.get_multi_ptr<sycl::access::decorated::no>().get()
+ );
+ });
+ });
+ } else {
+ stream->submit([&](sycl::handler& cgh) {
+ sycl::local_accessor<float, 1> shared_mem_acc(shared_mem_size, cgh);
+
+ cgh.parallel_for(
+ sycl::nd_range<3>(grid_dims * block_dims, block_dims),
+ [=](sycl::nd_item<3> item_ct1) {
+ rwkv_wkv7_f32_kernel<WKV_BLOCK_SIZE * 2>(
+ B, T, C, H, r_d, w_d, k_d, v_d, a_d, b_d, s_d, dst_d,
+ item_ct1, (float*)shared_mem_acc.get_multi_ptr<sycl::access::decorated::no>().get()
+ );
+ });
+ });
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-sycl/wkv.hpp b/llama.cpp/ggml/src/ggml-sycl/wkv.hpp
new file mode 100644
index 0000000..9f34a10
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-sycl/wkv.hpp
@@ -0,0 +1,10 @@
+#ifndef GGML_SYCL_WKV_HPP
+#define GGML_SYCL_WKV_HPP
+
+#include "common.hpp"
+
+void ggml_sycl_op_rwkv_wkv6(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+void ggml_sycl_op_rwkv_wkv7(ggml_backend_sycl_context & ctx, ggml_tensor * dst);
+
+#endif // GGML_SYCL_WKV_HPP