summaryrefslogtreecommitdiff
path: root/llama.cpp/ggml/src/ggml-metal
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp/ggml/src/ggml-metal')
-rw-r--r--llama.cpp/ggml/src/ggml-metal/CMakeLists.txt124
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-common.cpp446
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-common.h52
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-context.h41
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-context.m702
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-device.cpp1875
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-device.h290
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-device.m1748
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h1051
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.cpp4222
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.h93
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal.cpp937
-rw-r--r--llama.cpp/ggml/src/ggml-metal/ggml-metal.metal9798
13 files changed, 21379 insertions, 0 deletions
diff --git a/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt b/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt
new file mode 100644
index 0000000..42054d8
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/CMakeLists.txt
@@ -0,0 +1,124 @@
+find_library(FOUNDATION_LIBRARY Foundation REQUIRED)
+find_library(METAL_FRAMEWORK Metal REQUIRED)
+find_library(METALKIT_FRAMEWORK MetalKit REQUIRED)
+
+message(STATUS "Metal framework found")
+
+ggml_add_backend_library(ggml-metal
+ ggml-metal.cpp
+ ggml-metal-device.m
+ ggml-metal-device.cpp
+ ggml-metal-common.cpp
+ ggml-metal-context.m
+ ggml-metal-ops.cpp
+ )
+
+target_link_libraries(ggml-metal PRIVATE
+ ${FOUNDATION_LIBRARY}
+ ${METAL_FRAMEWORK}
+ ${METALKIT_FRAMEWORK}
+ )
+
+if (GGML_METAL_NDEBUG)
+ add_compile_definitions(GGML_METAL_NDEBUG)
+endif()
+
+set(METALLIB_COMMON "${CMAKE_CURRENT_SOURCE_DIR}/../ggml-common.h")
+if (GGML_METAL_EMBED_LIBRARY)
+ enable_language(ASM)
+
+ add_compile_definitions(GGML_METAL_EMBED_LIBRARY)
+
+ set(METALLIB_SOURCE "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal.metal")
+ set(METALLIB_IMPL "${CMAKE_CURRENT_SOURCE_DIR}/ggml-metal-impl.h")
+
+ file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/autogenerated")
+
+ # merge ggml-common.h and ggml-metal.metal into a single file
+ set(METALLIB_EMBED_ASM "${CMAKE_CURRENT_BINARY_DIR}/autogenerated/ggml-metal-embed.s")
+ set(METALLIB_SOURCE_EMBED "${CMAKE_CURRENT_BINARY_DIR}/autogenerated/ggml-metal-embed.metal")
+ set(METALLIB_SOURCE_EMBED_TMP "${CMAKE_CURRENT_BINARY_DIR}/autogenerated/ggml-metal-embed.metal.tmp")
+
+ add_custom_command(
+ OUTPUT "${METALLIB_EMBED_ASM}"
+ COMMAND echo "Embedding Metal library"
+ COMMAND sed -e "/__embed_ggml-common.h__/r ${METALLIB_COMMON}" -e "/__embed_ggml-common.h__/d" < "${METALLIB_SOURCE}" > "${METALLIB_SOURCE_EMBED_TMP}"
+ COMMAND sed -e "/\#include \"ggml-metal-impl.h\"/r ${METALLIB_IMPL}" -e "/\#include \"ggml-metal-impl.h\"/d" < "${METALLIB_SOURCE_EMBED_TMP}" > "${METALLIB_SOURCE_EMBED}"
+ COMMAND echo ".section __DATA,__ggml_metallib" > "${METALLIB_EMBED_ASM}"
+ COMMAND echo ".globl _ggml_metallib_start" >> "${METALLIB_EMBED_ASM}"
+ COMMAND echo "_ggml_metallib_start:" >> "${METALLIB_EMBED_ASM}"
+ COMMAND echo .incbin "\"${METALLIB_SOURCE_EMBED}\"" >> "${METALLIB_EMBED_ASM}"
+ COMMAND echo ".globl _ggml_metallib_end" >> "${METALLIB_EMBED_ASM}"
+ COMMAND echo "_ggml_metallib_end:" >> "${METALLIB_EMBED_ASM}"
+ DEPENDS ../ggml-common.h ggml-metal.metal ggml-metal-impl.h
+ COMMENT "Generate assembly for embedded Metal library"
+ VERBATIM
+ )
+
+ target_sources(ggml-metal PRIVATE "${METALLIB_EMBED_ASM}")
+else()
+ # copy metal files to bin directory
+ configure_file(../ggml-common.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h COPYONLY)
+ configure_file(ggml-metal.metal ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal COPYONLY)
+ configure_file(ggml-metal-impl.h ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal-impl.h COPYONLY)
+
+ if (GGML_METAL_SHADER_DEBUG)
+ # custom command to do the following:
+ # xcrun -sdk macosx metal -fno-fast-math -c ggml-metal.metal -o ggml-metal.air
+ # xcrun -sdk macosx metallib ggml-metal.air -o default.metallib
+ #
+ # note: this is the only way I found to disable fast-math in Metal. it's ugly, but at least it works
+ # disabling fast math is needed in order to pass tests/test-backend-ops
+ # note: adding -fno-inline fixes the tests when using MTL_SHADER_VALIDATION=1
+ # note: unfortunately, we have to call it default.metallib instead of ggml.metallib
+ # ref: https://github.com/ggml-org/whisper.cpp/issues/1720
+ # note: adding -g causes segmentation fault during compile
+ #set(XC_FLAGS -fno-fast-math -fno-inline -g)
+ set(XC_FLAGS -fno-fast-math -fno-inline)
+ else()
+ set(XC_FLAGS -O3)
+ endif()
+
+ # Append macOS metal versioning flags
+ if (GGML_METAL_MACOSX_VERSION_MIN)
+ message(STATUS "Adding -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN} flag to metal compilation")
+ list (APPEND XC_FLAGS -mmacosx-version-min=${GGML_METAL_MACOSX_VERSION_MIN})
+ endif()
+
+ if (GGML_METAL_STD)
+ message(STATUS "Adding -std=${GGML_METAL_STD} flag to metal compilation")
+ list (APPEND XC_FLAGS -std=${GGML_METAL_STD})
+ endif()
+
+ add_custom_command(
+ OUTPUT ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
+ COMMAND xcrun -sdk macosx metal ${XC_FLAGS} -c ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal -o - |
+ xcrun -sdk macosx metallib - -o ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
+ COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-common.h
+ COMMAND rm -f ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/ggml-metal.metal
+ DEPENDS ggml-metal.metal ${METALLIB_COMMON}
+ COMMENT "Compiling Metal kernels"
+ )
+
+ # FIXME: only add to the ggml-metal target?
+ add_custom_target(
+ ggml-metal-lib ALL
+ DEPENDS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
+ )
+endif() # GGML_METAL_EMBED_LIBRARY
+
+if (NOT GGML_METAL_EMBED_LIBRARY)
+ install(
+ FILES src/ggml-metal/ggml-metal.metal
+ PERMISSIONS
+ OWNER_READ
+ OWNER_WRITE
+ GROUP_READ
+ WORLD_READ
+ DESTINATION ${CMAKE_INSTALL_BINDIR})
+
+ install(
+ FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/default.metallib
+ DESTINATION ${CMAKE_INSTALL_BINDIR}
+ )
+endif()
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-common.cpp b/llama.cpp/ggml/src/ggml-metal/ggml-metal-common.cpp
new file mode 100644
index 0000000..95627d3
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-common.cpp
@@ -0,0 +1,446 @@
+#include "ggml-metal-common.h"
+
+#include "ggml-impl.h"
+#include "ggml-backend-impl.h"
+
+#include <vector>
+
+// represents a memory range (i.e. an interval from a starting address p0 to an ending address p1 in a given buffer pb)
+// the type indicates whether it is a source range (i.e. ops read data from it) or a destination range (i.e. ops write data to it)
+struct ggml_mem_range {
+ uint64_t pb; // buffer id
+
+ uint64_t p0; // begin
+ uint64_t p1; // end
+
+ ggml_mem_range_type pt;
+};
+
+struct ggml_mem_ranges {
+ std::vector<ggml_mem_range> ranges;
+
+ int debug = 0;
+};
+
+ggml_mem_ranges_t ggml_mem_ranges_init(int debug) {
+ auto * res = new ggml_mem_ranges;
+
+ res->ranges.reserve(256);
+ res->debug = debug;
+
+ return res;
+}
+
+void ggml_mem_ranges_free(ggml_mem_ranges_t mrs) {
+ delete mrs;
+}
+
+void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs) {
+ mrs->ranges.clear();
+}
+
+static bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, ggml_mem_range mr) {
+ mrs->ranges.push_back(mr);
+
+ return true;
+}
+
+static ggml_mem_range ggml_mem_range_from_tensor(const ggml_tensor * tensor, ggml_mem_range_type pt) {
+ // always use the base tensor
+ tensor = tensor->view_src ? tensor->view_src : tensor;
+
+ GGML_ASSERT(!tensor->view_src);
+
+ ggml_mem_range mr;
+
+ if (tensor->buffer) {
+ // when the tensor is allocated, use the actual memory address range in the buffer
+ //
+ // take the actual allocated size with ggml_backend_buft_get_alloc_size()
+ // this can be larger than the tensor size if the buffer type allocates extra memory
+ // ref: https://github.com/ggml-org/llama.cpp/pull/15966
+ mr = {
+ /*.pb =*/ (uint64_t) tensor->buffer,
+ /*.p0 =*/ (uint64_t) tensor->data,
+ /*.p1 =*/ (uint64_t) tensor->data + ggml_backend_buft_get_alloc_size(tensor->buffer->buft, tensor),
+ /*.pt =*/ pt,
+ };
+ } else {
+ // otherwise, the pointer address is used as an unique id of the memory ranges
+ // that the tensor will be using when it is allocated
+ mr = {
+ /*.pb =*/ (uint64_t) tensor,
+ /*.p0 =*/ 0, //
+ /*.p1 =*/ 1024, // [0, 1024) is a dummy range, not used
+ /*.pt =*/ pt,
+ };
+ };
+
+ return mr;
+}
+
+static ggml_mem_range ggml_mem_range_from_tensor_src(const ggml_tensor * tensor) {
+ return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_SRC);
+}
+
+static ggml_mem_range ggml_mem_range_from_tensor_dst(const ggml_tensor * tensor) {
+ return ggml_mem_range_from_tensor(tensor, MEM_RANGE_TYPE_DST);
+}
+
+static bool ggml_mem_ranges_add_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
+ GGML_ASSERT(tensor);
+
+ ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor);
+
+ if (mrs->debug > 2) {
+ GGML_LOG_DEBUG("%s: add src range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1);
+ }
+
+ return ggml_mem_ranges_add(mrs, mr);
+}
+
+static bool ggml_mem_ranges_add_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
+ GGML_ASSERT(tensor);
+
+ ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor);
+
+ if (mrs->debug > 2) {
+ GGML_LOG_DEBUG("%s: add dst range buf=%lld, [%lld, %lld)\n", __func__, mr.pb, mr.p0, mr.p1);
+ }
+
+ return ggml_mem_ranges_add(mrs, mr);
+}
+
+bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (tensor->src[i]) {
+ ggml_mem_ranges_add_src(mrs, tensor->src[i]);
+ }
+ }
+
+ return ggml_mem_ranges_add_dst(mrs, tensor);
+}
+
+static bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, ggml_mem_range mr) {
+ for (size_t i = 0; i < mrs->ranges.size(); i++) {
+ const auto & cmp = mrs->ranges[i];
+
+ // two memory ranges cannot intersect if they are in different buffers
+ if (mr.pb != cmp.pb) {
+ continue;
+ }
+
+ // intersecting source ranges are allowed
+ if (mr.pt == MEM_RANGE_TYPE_SRC && cmp.pt == MEM_RANGE_TYPE_SRC) {
+ continue;
+ }
+
+ if (mr.p0 < cmp.p1 && mr.p1 >= cmp.p0) {
+ if (mrs->debug > 2) {
+ GGML_LOG_DEBUG("%s: the %s range buf=%lld, [%lld, %lld) overlaps with a previous %s range buf=%lld, [%lld, %lld)\n",
+ __func__,
+ mr.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst",
+ mr.pb, mr.p0, mr.p1,
+ cmp.pt == MEM_RANGE_TYPE_SRC ? "src" : "dst",
+ cmp.pb, cmp.p0, cmp.p1);
+ }
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static bool ggml_mem_ranges_check_src(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
+ GGML_ASSERT(tensor);
+
+ ggml_mem_range mr = ggml_mem_range_from_tensor_src(tensor);
+
+ const bool res = ggml_mem_ranges_check(mrs, mr);
+
+ return res;
+}
+
+static bool ggml_mem_ranges_check_dst(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
+ GGML_ASSERT(tensor);
+
+ ggml_mem_range mr = ggml_mem_range_from_tensor_dst(tensor);
+
+ const bool res = ggml_mem_ranges_check(mrs, mr);
+
+ return res;
+}
+
+bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const ggml_tensor * tensor) {
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (tensor->src[i]) {
+ if (!ggml_mem_ranges_check_src(mrs, tensor->src[i])) {
+ return false;
+ }
+ }
+ }
+
+ return ggml_mem_ranges_check_dst(mrs, tensor);
+}
+
+struct node_info {
+ ggml_tensor * node;
+
+ std::vector<ggml_tensor *> fused;
+
+ ggml_op op() const {
+ return node->op;
+ }
+
+ const ggml_tensor * dst() const {
+ return fused.empty() ? node : fused.back();
+ }
+
+ bool is_empty() const {
+ return ggml_op_is_empty(node->op);
+ }
+
+ void add_fused(ggml_tensor * t) {
+ fused.push_back(t);
+ }
+};
+
+static std::vector<int> ggml_metal_graph_optimize_reorder(const std::vector<node_info> & nodes) {
+ // helper to add node src and dst ranges
+ const auto & h_add = [](ggml_mem_ranges_t mrs, const node_info & node) {
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (node.node->src[i]) {
+ if (!ggml_mem_ranges_add_src(mrs, node.node->src[i])) {
+ return false;
+ }
+ }
+ }
+
+ // keep track of the sources of the fused nodes as well
+ for (const auto * fused : node.fused) {
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (fused->src[i]) {
+ if (!ggml_mem_ranges_add_src(mrs, fused->src[i])) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return ggml_mem_ranges_add_dst(mrs, node.dst());
+ };
+
+ // helper to check if a node can run concurrently with the existing set of nodes
+ const auto & h_check = [](ggml_mem_ranges_t mrs, const node_info & node) {
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (node.node->src[i]) {
+ if (!ggml_mem_ranges_check_src(mrs, node.node->src[i])) {
+ return false;
+ }
+ }
+ }
+
+ for (const auto * fused : node.fused) {
+ for (int i = 0; i < GGML_MAX_SRC; i++) {
+ if (fused->src[i]) {
+ if (!ggml_mem_ranges_check_src(mrs, fused->src[i])) {
+ return false;
+ }
+ }
+ }
+ }
+
+ return ggml_mem_ranges_check_dst(mrs, node.dst());
+ };
+
+ // perform reorders only across these types of ops
+ // can be expanded when needed
+ const auto & h_safe = [](ggml_op op) {
+ switch (op) {
+ case GGML_OP_MUL_MAT:
+ case GGML_OP_MUL_MAT_ID:
+ case GGML_OP_ROPE:
+ case GGML_OP_NORM:
+ case GGML_OP_RMS_NORM:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_MUL:
+ case GGML_OP_ADD:
+ case GGML_OP_DIV:
+ case GGML_OP_GLU:
+ case GGML_OP_SCALE:
+ case GGML_OP_GET_ROWS:
+ case GGML_OP_CPY:
+ case GGML_OP_SET_ROWS:
+ return true;
+ default:
+ return ggml_op_is_empty(op);
+ }
+ };
+
+ const int n = nodes.size();
+
+ std::vector<int> res;
+ res.reserve(n);
+
+ std::vector<bool> used(n, false);
+
+ // the memory ranges for the set of currently concurrent nodes
+ ggml_mem_ranges_t mrs0 = ggml_mem_ranges_init(0);
+
+ // the memory ranges for the set of nodes that haven't been processed yet, when looking forward for a node to reorder
+ ggml_mem_ranges_t mrs1 = ggml_mem_ranges_init(0);
+
+ for (int i0 = 0; i0 < n; i0++) {
+ if (used[i0]) {
+ continue;
+ }
+
+ const auto & node0 = nodes[i0];
+
+ // the node is not concurrent with the existing concurrent set, so we have to "put a barrier" (i.e reset mrs0)
+ // but before we do that, look forward for some other nodes that can be added to the concurrent set mrs0
+ //
+ // note: we can always add empty nodes to the concurrent set as they don't read nor write anything
+ if (!node0.is_empty() && !h_check(mrs0, node0)) {
+ // this will hold the set of memory ranges from the nodes that haven't been processed yet
+ // if a node is not concurrent with this set, we cannot reorder it
+ ggml_mem_ranges_reset(mrs1);
+
+ // initialize it with the current node
+ h_add(mrs1, node0);
+
+ // that many nodes forward to search for a concurrent node
+ constexpr int N_FORWARD = 8;
+
+ for (int i1 = i0 + 1; i1 < i0 + N_FORWARD && i1 < n; i1++) {
+ if (used[i1]) {
+ continue;
+ }
+
+ const auto & node1 = nodes[i1];
+
+ // disallow reordering of certain ops
+ if (!h_safe(node1.op())) {
+ break;
+ }
+
+ const bool is_empty = node1.is_empty();
+
+ // to reorder a node and add it to the concurrent set, it has to be:
+ // + empty or concurrent with all nodes in the existing concurrent set (mrs0)
+ // + concurrent with all nodes prior to it that haven't been processed yet (mrs1)
+ if ((is_empty || h_check(mrs0, node1)) && h_check(mrs1, node1)) {
+ // add the node to the existing concurrent set (i.e. reorder it for early execution)
+ h_add(mrs0, node1);
+ res.push_back(i1);
+
+ // mark as used, so we skip re-processing it later
+ used[i1] = true;
+ } else {
+ // expand the set of nodes that haven't been processed yet
+ h_add(mrs1, node1);
+ }
+ }
+
+ // finalize the concurrent set and begin a new one
+ ggml_mem_ranges_reset(mrs0);
+ }
+
+ // expand the concurrent set with the current node
+ {
+ h_add(mrs0, node0);
+ res.push_back(i0);
+ }
+ }
+
+ ggml_mem_ranges_free(mrs0);
+ ggml_mem_ranges_free(mrs1);
+
+ return res;
+}
+
+void ggml_graph_optimize(ggml_cgraph * gf) {
+ constexpr int MAX_FUSE = 16;
+
+ const int n = gf->n_nodes;
+
+ enum ggml_op ops[MAX_FUSE];
+
+ std::vector<node_info> nodes;
+ nodes.reserve(gf->n_nodes);
+
+ // fuse nodes:
+ // we don't want to make reorders that break fusing, so we first pack all fusable tensors
+ // and perform the reorder over the fused nodes. after the reorder is done, we unfuse
+ for (int i = 0; i < n; i++) {
+ node_info node = {
+ /*.node =*/ gf->nodes[i],
+ /*.fused =*/ {},
+ };
+
+ // fuse only ops that start with these operations
+ // can be expanded when needed
+ if (node.op() == GGML_OP_ADD ||
+ node.op() == GGML_OP_NORM ||
+ node.op() == GGML_OP_RMS_NORM) {
+ ops[0] = node.op();
+
+ int f = i + 1;
+ while (f < n && f < i + MAX_FUSE) {
+ // conservatively allow fusing only these ops
+ // can be expanded when needed
+ if (gf->nodes[f]->op != GGML_OP_ADD &&
+ gf->nodes[f]->op != GGML_OP_MUL &&
+ gf->nodes[f]->op != GGML_OP_NORM &&
+ gf->nodes[f]->op != GGML_OP_RMS_NORM) {
+ break;
+ }
+ ops[f - i] = gf->nodes[f]->op;
+ f++;
+ }
+
+ f -= i;
+ for (; f > 1; f--) {
+ if (ggml_can_fuse(gf, i, ops, f)) {
+ break;
+ }
+ }
+
+ // add the fused tensors into the node info so we can unfuse them later
+ for (int k = 1; k < f; k++) {
+ ++i;
+
+ // the .dst() becomes the last fused tensor
+ node.add_fused(gf->nodes[i]);
+ }
+ }
+
+ nodes.push_back(std::move(node));
+ }
+
+#if 1
+ // reorder to improve concurrency
+ const auto order = ggml_metal_graph_optimize_reorder(nodes);
+#else
+ std::vector<int> order(nodes.size());
+ for (size_t i = 0; i < nodes.size(); i++) {
+ order[i] = i;
+ }
+#endif
+
+ // unfuse
+ {
+ int j = 0;
+ for (const auto i : order) {
+ const auto & node = nodes[i];
+
+ gf->nodes[j++] = node.node;
+
+ for (auto * fused : node.fused) {
+ gf->nodes[j++] = fused;
+ }
+ }
+ }
+}
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-common.h b/llama.cpp/ggml/src/ggml-metal/ggml-metal-common.h
new file mode 100644
index 0000000..3acbc6a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-common.h
@@ -0,0 +1,52 @@
+// helper functions for ggml-metal that are too difficult to implement in Objective-C
+
+#pragma once
+
+#include <stdbool.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct ggml_tensor;
+struct ggml_cgraph;
+
+enum ggml_mem_range_type {
+ MEM_RANGE_TYPE_SRC = 0,
+ MEM_RANGE_TYPE_DST = 1,
+};
+
+// a helper object that can be used for reordering operations to improve concurrency
+//
+// the fundamental idea is that a set of tasks (either ggml ops, or something else) can run concurrently if they
+// don't write to a memory that is being read by another task or written to by another task in the set
+//
+// with this structure, we can add tasks to the set, setting memory constraints. we can also check if a new task
+// can be added to the set without violating the constraints (i.e. if it can be executed concurrently with the
+// tasks already in the set)
+//
+typedef struct ggml_mem_ranges * ggml_mem_ranges_t;
+
+ggml_mem_ranges_t ggml_mem_ranges_init(int debug);
+void ggml_mem_ranges_free(ggml_mem_ranges_t mrs);
+
+// remove all ranges from the set
+void ggml_mem_ranges_reset(ggml_mem_ranges_t mrs);
+
+// add src or dst ranges to track
+bool ggml_mem_ranges_add(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor);
+
+// return false if:
+// - new src range overlaps with any existing dst range
+// - new dst range overlaps with any existing range (src or dst)
+bool ggml_mem_ranges_check(ggml_mem_ranges_t mrs, const struct ggml_tensor * tensor);
+
+// reorder the nodes in the graph to improve concurrency, while respecting fusion
+//
+// note: this implementation is generic and not specific to metal
+// if it proves to work well, we can start using it for other backends in the future
+void ggml_graph_optimize(struct ggml_cgraph * gf);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-context.h b/llama.cpp/ggml/src/ggml-metal/ggml-metal-context.h
new file mode 100644
index 0000000..abf4b06
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-context.h
@@ -0,0 +1,41 @@
+#pragma once
+
+#include "ggml-metal-device.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+//
+// backend context
+//
+
+typedef struct ggml_metal * ggml_metal_t;
+
+ggml_metal_t ggml_metal_init(ggml_metal_device_t dev);
+void ggml_metal_free(ggml_metal_t ctx);
+
+const char * ggml_metal_get_name(ggml_metal_t ctx);
+
+void ggml_metal_synchronize(ggml_metal_t ctx);
+
+void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+bool ggml_metal_cpy_tensor_async(ggml_metal_t ctx_src, ggml_metal_t ctx_dst, const struct ggml_tensor * src, struct ggml_tensor * dst);
+
+enum ggml_status ggml_metal_graph_compute (ggml_metal_t ctx, struct ggml_cgraph * gf);
+void ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf);
+
+void ggml_metal_event_record(ggml_metal_t ctx, ggml_metal_event_t ev);
+void ggml_metal_event_wait (ggml_metal_t ctx, ggml_metal_event_t ev);
+
+ggml_metal_event_t ggml_metal_get_ev_cpy(ggml_metal_t ctx);
+
+void ggml_metal_set_n_cb (ggml_metal_t ctx, int n_cb);
+void ggml_metal_set_abort_callback (ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data);
+bool ggml_metal_supports_family (ggml_metal_t ctx, int family);
+void ggml_metal_capture_next_compute(ggml_metal_t ctx);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-context.m b/llama.cpp/ggml/src/ggml-metal/ggml-metal-context.m
new file mode 100644
index 0000000..5d3a8ce
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-context.m
@@ -0,0 +1,702 @@
+#import "ggml-metal-context.h"
+
+#import "ggml-impl.h"
+#import "ggml-backend-impl.h"
+
+#import "ggml-metal-impl.h"
+#import "ggml-metal-common.h"
+#import "ggml-metal-ops.h"
+
+#import <Foundation/Foundation.h>
+
+#import <Metal/Metal.h>
+
+#undef MIN
+#undef MAX
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+#define MAX(a, b) ((a) > (b) ? (a) : (b))
+
+// max number of MTLCommandBuffer used to submit a graph for processing
+#define GGML_METAL_MAX_COMMAND_BUFFERS 8
+
+struct ggml_metal_command_buffer {
+ id<MTLCommandBuffer> obj;
+};
+
+struct ggml_metal {
+ char name[128];
+
+ ggml_metal_device_t dev;
+ ggml_metal_library_t lib;
+
+ ggml_metal_event_t ev_cpy; // for async copies
+
+ dispatch_queue_t d_queue;
+
+ // additional, inference-time compiled pipelines
+ ggml_metal_pipelines_t pipelines_ext;
+
+ bool use_fusion;
+ bool use_concurrency;
+ bool use_graph_optimize;
+
+ int debug_graph;
+ int debug_fusion;
+
+ // how many times a given op was fused
+ uint64_t fuse_cnt[GGML_OP_COUNT];
+
+ // capture state
+ bool capture_next_compute;
+ bool capture_started;
+
+ id<MTLCaptureScope> capture_scope;
+
+ // command buffer state
+ int n_cb; // number of extra threads used to submit the command buffers
+ int n_nodes_0; // number of nodes submitted by the main thread
+ int n_nodes_1; // remaining number of nodes submitted by the n_cb threads
+ int n_nodes_per_cb;
+
+ struct ggml_cgraph * gf;
+
+ // the callback given to the thread pool
+ void (^encode_async)(size_t ith);
+
+ // n_cb command buffers + 1 used by the main thread
+ struct ggml_metal_command_buffer cmd_bufs[GGML_METAL_MAX_COMMAND_BUFFERS + 1];
+
+ // extra command buffers for things like getting, setting and copying tensors
+ NSMutableArray * cmd_bufs_ext;
+
+ // the last command buffer queued into the Metal queue with operations relevant to the current Metal backend
+ id<MTLCommandBuffer> cmd_buf_last;
+
+ // abort ggml_metal_graph_compute if callback returns true
+ ggml_abort_callback abort_callback;
+ void * abort_callback_data;
+};
+
+ggml_metal_t ggml_metal_init(ggml_metal_device_t dev) {
+ GGML_LOG_INFO("%s: allocating\n", __func__);
+
+#if TARGET_OS_OSX && !GGML_METAL_NDEBUG
+ // Show all the Metal device instances in the system
+ NSArray * devices = MTLCopyAllDevices();
+ for (id<MTLDevice> device in devices) {
+ GGML_LOG_INFO("%s: found device: %s\n", __func__, [[device name] UTF8String]);
+ }
+ [devices release]; // since it was created by a *Copy* C method
+#endif
+
+ // init context
+ ggml_metal_t res = calloc(1, sizeof(struct ggml_metal));
+
+ id<MTLDevice> device = ggml_metal_device_get_obj(dev);
+
+ GGML_LOG_INFO("%s: picking default device: %s\n", __func__, [[device name] UTF8String]);
+
+ // TODO: would it be better to have one queue for the backend and one queue for the device?
+ // the graph encoders and async ops would use the backend queue while the sync ops would use the device queue?
+ //res->queue = [device newCommandQueue]; [TAG_QUEUE_PER_BACKEND]
+ id<MTLCommandQueue> queue = ggml_metal_device_get_queue(dev);
+ if (queue == nil) {
+ GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__);
+ return NULL;
+ }
+
+ res->dev = dev;
+ res->lib = ggml_metal_device_get_library(dev);
+ if (res->lib == NULL) {
+ GGML_LOG_WARN("%s: the device does not have a precompiled Metal library - this is unexpected\n", __func__);
+ GGML_LOG_WARN("%s: will try to compile it on the fly\n", __func__);
+
+ res->lib = ggml_metal_library_init(dev);
+ if (res->lib == NULL) {
+ GGML_LOG_ERROR("%s: error: failed to initialize the Metal library\n", __func__);
+
+ free(res);
+
+ return NULL;
+ }
+ }
+
+ res->ev_cpy = ggml_metal_device_event_init(dev);
+
+ const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev);
+
+ snprintf(res->name, sizeof(res->name), "%s", props_dev->name);
+
+ res->d_queue = dispatch_queue_create("ggml-metal", DISPATCH_QUEUE_CONCURRENT);
+
+ res->use_fusion = getenv("GGML_METAL_FUSION_DISABLE") == nil;
+ res->use_concurrency = getenv("GGML_METAL_CONCURRENCY_DISABLE") == nil;
+
+ {
+ const char * val = getenv("GGML_METAL_GRAPH_DEBUG");
+ res->debug_graph = val ? atoi(val) : 0;
+ }
+
+ {
+ const char * val = getenv("GGML_METAL_FUSION_DEBUG");
+ res->debug_fusion = val ? atoi(val) : 0;
+ }
+
+ res->use_graph_optimize = true;
+
+ if (getenv("GGML_METAL_GRAPH_OPTIMIZE_DISABLE") != NULL) {
+ res->use_graph_optimize = false;
+ }
+
+ memset(res->fuse_cnt, 0, sizeof(res->fuse_cnt));
+
+ GGML_LOG_INFO("%s: use fusion = %s\n", __func__, res->use_fusion ? "true" : "false");
+ GGML_LOG_INFO("%s: use concurrency = %s\n", __func__, res->use_concurrency ? "true" : "false");
+ GGML_LOG_INFO("%s: use graph optimize = %s\n", __func__, res->use_graph_optimize ? "true" : "false");
+
+ res->capture_next_compute = false;
+ res->capture_started = false;
+ res->capture_scope = nil;
+
+ res->gf = nil;
+ res->encode_async = nil;
+ for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
+ res->cmd_bufs[i].obj = nil;
+ }
+
+ res->cmd_bufs_ext = [[NSMutableArray alloc] init];
+
+ res->cmd_buf_last = nil;
+
+ res->pipelines_ext = ggml_metal_pipelines_init();
+
+ return res;
+}
+
+void ggml_metal_free(ggml_metal_t ctx) {
+ GGML_LOG_INFO("%s: deallocating\n", __func__);
+
+ for (int i = 0; i < GGML_METAL_MAX_COMMAND_BUFFERS; ++i) {
+ if (ctx->cmd_bufs[i].obj) {
+ [ctx->cmd_bufs[i].obj release];
+ }
+ }
+
+ for (int i = 0; i < (int) ctx->cmd_bufs_ext.count; ++i) {
+ if (ctx->cmd_bufs_ext[i]) {
+ [ctx->cmd_bufs_ext[i] release];
+ }
+ }
+
+ [ctx->cmd_bufs_ext removeAllObjects];
+ [ctx->cmd_bufs_ext release];
+
+ if (ctx->pipelines_ext) {
+ ggml_metal_pipelines_free(ctx->pipelines_ext);
+ ctx->pipelines_ext = nil;
+ }
+
+ if (ctx->debug_fusion > 0) {
+ GGML_LOG_DEBUG("%s: fusion stats:\n", __func__);
+ for (int i = 0; i < GGML_OP_COUNT; i++) {
+ if (ctx->fuse_cnt[i] == 0) {
+ continue;
+ }
+
+ // note: cannot use ggml_log here
+ GGML_LOG_DEBUG("%s: - %s: %" PRIu64 "\n", __func__, ggml_op_name((enum ggml_op) i), ctx->fuse_cnt[i]);
+ }
+ }
+
+ Block_release(ctx->encode_async);
+
+ //[ctx->queue release]; // [TAG_QUEUE_PER_BACKEND]
+
+ dispatch_release(ctx->d_queue);
+
+ ggml_metal_device_event_free(ctx->dev, ctx->ev_cpy);
+
+ free(ctx);
+}
+
+const char * ggml_metal_get_name(ggml_metal_t ctx) {
+ return ctx->name;
+}
+
+void ggml_metal_synchronize(ggml_metal_t ctx) {
+ // wait for any backend operations to finish
+ if (ctx->cmd_buf_last) {
+ [ctx->cmd_buf_last waitUntilCompleted];
+ ctx->cmd_buf_last = nil;
+ }
+
+ // check status of all command buffers
+ {
+ const int n_cb = ctx->n_cb;
+
+ for (int cb_idx = 0; cb_idx <= n_cb; ++cb_idx) {
+ id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[cb_idx].obj;
+ if (!cmd_buf) {
+ continue;
+ }
+
+ MTLCommandBufferStatus status = [cmd_buf status];
+ if (status != MTLCommandBufferStatusCompleted) {
+ GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, cb_idx, (int) status);
+ if (status == MTLCommandBufferStatusError) {
+ GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
+ }
+ GGML_ABORT("fatal error");
+ }
+ }
+ }
+
+ // release any completed extra command buffers
+ if (ctx->cmd_bufs_ext.count > 0) {
+ for (size_t i = 0; i < ctx->cmd_bufs_ext.count; ++i) {
+ id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs_ext[i];
+
+ MTLCommandBufferStatus status = [cmd_buf status];
+ if (status != MTLCommandBufferStatusCompleted) {
+ GGML_LOG_ERROR("%s: error: command buffer %d failed with status %d\n", __func__, (int) i, (int) status);
+ if (status == MTLCommandBufferStatusError) {
+ GGML_LOG_ERROR("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
+ }
+ GGML_ABORT("fatal error");
+ }
+
+ [cmd_buf release];
+ }
+
+ [ctx->cmd_bufs_ext removeAllObjects];
+ }
+}
+
+static struct ggml_metal_buffer_id ggml_metal_get_buffer_id(const struct ggml_tensor * t) {
+ if (!t) {
+ return (struct ggml_metal_buffer_id) { nil, 0 };
+ }
+
+ ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
+
+ return ggml_metal_buffer_get_id(buffer->context, t);
+}
+
+void ggml_metal_set_tensor_async(ggml_metal_t ctx, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ @autoreleasepool {
+ // wrap the source data into a Metal buffer
+ id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
+ id<MTLBuffer> buf_src = [device newBufferWithBytes:data
+ length:size
+ options:MTLResourceStorageModeShared];
+
+ GGML_ASSERT(buf_src);
+
+ struct ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(tensor);
+ if (bid_dst.metal == nil) {
+ GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
+ }
+
+ bid_dst.offs += offset;
+
+ // queue the copy operation into the queue of the Metal context
+ // this will be queued at the end, after any currently ongoing GPU operations
+ id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
+ id<MTLCommandBuffer> cmd_buf = [queue commandBuffer];
+ id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+ [encoder copyFromBuffer:buf_src
+ sourceOffset:0
+ toBuffer:bid_dst.metal
+ destinationOffset:bid_dst.offs
+ size:size];
+
+ [encoder endEncoding];
+ [cmd_buf commit];
+ [buf_src release];
+
+ // do not wait here for completion
+ //[cmd_buf waitUntilCompleted];
+
+ // instead, remember a reference to the command buffer and wait for it later if needed
+ [ctx->cmd_bufs_ext addObject:cmd_buf];
+ ctx->cmd_buf_last = cmd_buf;
+
+ [cmd_buf retain];
+ }
+}
+
+void ggml_metal_get_tensor_async(ggml_metal_t ctx, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ @autoreleasepool {
+ id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
+ id<MTLBuffer> buf_dst = [device newBufferWithBytesNoCopy:data
+ length:size
+ options:MTLResourceStorageModeShared
+ deallocator:nil];
+
+ GGML_ASSERT(buf_dst);
+
+ struct ggml_metal_buffer_id bid_src = ggml_metal_get_buffer_id(tensor);
+ if (bid_src.metal == nil) {
+ GGML_ABORT("%s: failed to find buffer for tensor '%s'\n", __func__, tensor->name);
+ }
+
+ bid_src.offs += offset;
+
+ // queue the copy operation into the queue of the Metal context
+ // this will be queued at the end, after any currently ongoing GPU operations
+ id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
+ id<MTLCommandBuffer> cmd_buf = [queue commandBuffer];
+ id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+ [encoder copyFromBuffer:bid_src.metal
+ sourceOffset:bid_src.offs
+ toBuffer:buf_dst
+ destinationOffset:0
+ size:size];
+
+ [encoder endEncoding];
+ [cmd_buf commit];
+ [buf_dst release];
+
+ // do not wait here for completion
+ //[cmd_buf waitUntilCompleted];
+
+ // instead, remember a reference to the command buffer and wait for it later if needed
+ [ctx->cmd_bufs_ext addObject:cmd_buf];
+ ctx->cmd_buf_last = cmd_buf;
+
+ [cmd_buf retain];
+ }
+}
+
+bool ggml_metal_cpy_tensor_async(ggml_metal_t ctx_src, ggml_metal_t ctx_dst, const struct ggml_tensor * src, struct ggml_tensor * dst) {
+ @autoreleasepool {
+ struct ggml_metal_buffer_id bid_src = ggml_metal_get_buffer_id(src);
+ struct ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(dst);
+
+ if (bid_src.metal == nil || bid_dst.metal == nil) {
+ return false;
+ }
+
+ // queue the copy operation into the Metal context
+ // this will be queued at the end, after any currently ongoing GPU operations
+ id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx_src->dev);
+ id<MTLCommandBuffer> cmd_buf = [queue commandBuffer];
+ id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+ [encoder copyFromBuffer:bid_src.metal
+ sourceOffset:bid_src.offs
+ toBuffer:bid_dst.metal
+ destinationOffset:bid_dst.offs
+ size:ggml_nbytes(src)];
+
+ [encoder endEncoding];
+
+ ggml_metal_event_t ev_cpy = ggml_metal_get_ev_cpy(ctx_src);
+ ggml_metal_event_encode_signal(ev_cpy, cmd_buf);
+
+ [cmd_buf commit];
+
+ // do not wait here for completion
+ //[cmd_buf waitUntilCompleted];
+
+ // instead, remember a reference to the command buffer and wait for it later if needed
+ [ctx_src->cmd_bufs_ext addObject:cmd_buf];
+ ctx_src->cmd_buf_last = cmd_buf;
+
+ [cmd_buf retain];
+
+ ggml_metal_event_wait(ctx_dst, ev_cpy);
+
+ return true;
+ }
+}
+
+enum ggml_status ggml_metal_graph_compute(ggml_metal_t ctx, struct ggml_cgraph * gf) {
+ // number of nodes encoded by the main thread (empirically determined)
+ const int n_main = MAX(64, 0.1*gf->n_nodes);
+
+ // number of threads in addition to the main thread
+ const int n_cb = ctx->n_cb;
+
+ // keep the memory wired
+ ggml_metal_device_rsets_keep_alive(ctx->dev);
+
+ // submit the ggml compute graph to the GPU by creating command buffers and encoding the ops in them
+ // the first n_nodes_0 are encoded and submitted for processing directly by the calling thread
+ // while these nodes are processing, we start n_cb threads to enqueue the rest of the nodes
+ // each thread creates it's own command buffer and enqueues the ops in parallel
+ //
+ // tests on M1 Pro and M2 Ultra using LLaMA models, show that optimal values for n_cb are 1 or 2
+
+ @autoreleasepool {
+ ctx->gf = gf;
+
+ ctx->n_nodes_0 = MIN(n_main, gf->n_nodes);
+ ctx->n_nodes_1 = gf->n_nodes - ctx->n_nodes_0;
+
+ ctx->n_nodes_per_cb = (ctx->n_nodes_1 + ctx->n_cb - 1) / ctx->n_cb;
+
+ const bool use_capture = ctx->capture_next_compute;
+ if (use_capture) {
+ ctx->capture_next_compute = false;
+
+ // make sure all previous computations have finished before starting the capture
+ if (ctx->cmd_buf_last) {
+ [ctx->cmd_buf_last waitUntilCompleted];
+ ctx->cmd_buf_last = nil;
+ }
+
+ if (!ctx->capture_started) {
+ // create capture scope
+ id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
+ ctx->capture_scope = [[MTLCaptureManager sharedCaptureManager] newCaptureScopeWithDevice:device];
+
+ MTLCaptureDescriptor * descriptor = [MTLCaptureDescriptor new];
+ descriptor.captureObject = ctx->capture_scope;
+ descriptor.destination = MTLCaptureDestinationGPUTraceDocument;
+ descriptor.outputURL = [NSURL fileURLWithPath:[NSString stringWithFormat:@"/tmp/perf-metal.gputrace"]];
+
+ NSError * error = nil;
+ if (![[MTLCaptureManager sharedCaptureManager] startCaptureWithDescriptor:descriptor error:&error]) {
+ GGML_LOG_ERROR("%s: error: unable to start capture '%s'\n", __func__, [[error localizedDescription] UTF8String]);
+ } else {
+ [ctx->capture_scope beginScope];
+ ctx->capture_started = true;
+ }
+ }
+ }
+
+ // short-hand
+ id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
+
+ // the main thread commits the first few commands immediately
+ // cmd_buf[n_cb]
+ {
+ id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
+ [cmd_buf retain];
+
+ if (ctx->cmd_bufs[n_cb].obj) {
+ [ctx->cmd_bufs[n_cb].obj release];
+ }
+ ctx->cmd_bufs[n_cb].obj = cmd_buf;
+
+ [cmd_buf enqueue];
+
+ ctx->encode_async(n_cb);
+ }
+
+ // remember the command buffer for the next iteration
+ ctx->cmd_buf_last = ctx->cmd_bufs[n_cb].obj;
+
+ // prepare the rest of the command buffers asynchronously (optional)
+ // cmd_buf[0.. n_cb)
+ for (int cb_idx = 0; cb_idx < n_cb; ++cb_idx) {
+ id<MTLCommandBuffer> cmd_buf = [queue commandBufferWithUnretainedReferences];
+ [cmd_buf retain];
+
+ if (ctx->cmd_bufs[cb_idx].obj) {
+ [ctx->cmd_bufs[cb_idx].obj release];
+ }
+ ctx->cmd_bufs[cb_idx].obj = cmd_buf;
+
+ // always enqueue the first two command buffers
+ // enqueue all of the command buffers if we don't need to abort
+ if (cb_idx < 2 || ctx->abort_callback == NULL) {
+ [cmd_buf enqueue];
+
+ // update the pointer to the last queued command buffer
+ // this is needed to implement synchronize()
+ ctx->cmd_buf_last = cmd_buf;
+ }
+ }
+
+ dispatch_apply(n_cb, ctx->d_queue, ctx->encode_async);
+
+ // for debugging: block until graph is computed
+ //[ctx->cmd_buf_last waitUntilCompleted];
+
+ // enter here only when capturing in order to wait for all computation to finish
+ // otherwise, we leave the graph to compute asynchronously
+ if (!use_capture && ctx->capture_started) {
+ // wait for completion and check status of each command buffer
+ // needed to detect if the device ran out-of-memory for example (#1881)
+ {
+ id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[n_cb].obj;
+ [cmd_buf waitUntilCompleted];
+
+ MTLCommandBufferStatus status = [cmd_buf status];
+ if (status != MTLCommandBufferStatusCompleted) {
+ GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, n_cb, status);
+ if (status == MTLCommandBufferStatusError) {
+ GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
+ }
+
+ return GGML_STATUS_FAILED;
+ }
+ }
+
+ for (int i = 0; i < n_cb; ++i) {
+ id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[i].obj;
+ [cmd_buf waitUntilCompleted];
+
+ MTLCommandBufferStatus status = [cmd_buf status];
+ if (status != MTLCommandBufferStatusCompleted) {
+ GGML_LOG_INFO("%s: command buffer %d failed with status %lu\n", __func__, i, status);
+ if (status == MTLCommandBufferStatusError) {
+ GGML_LOG_INFO("error: %s\n", [[cmd_buf error].localizedDescription UTF8String]);
+ }
+
+ return GGML_STATUS_FAILED;
+ }
+
+ id<MTLCommandBuffer> next_buffer = (i + 1 < n_cb ? ctx->cmd_bufs[i + 1].obj : nil);
+ if (!next_buffer) {
+ continue;
+ }
+
+ const bool next_queued = ([next_buffer status] != MTLCommandBufferStatusNotEnqueued);
+ if (next_queued) {
+ continue;
+ }
+
+ if (ctx->abort_callback && ctx->abort_callback(ctx->abort_callback_data)) {
+ GGML_LOG_INFO("%s: command buffer %d aborted", __func__, i);
+ return GGML_STATUS_ABORTED;
+ }
+
+ [next_buffer commit];
+ }
+
+ [ctx->capture_scope endScope];
+ [[MTLCaptureManager sharedCaptureManager] stopCapture];
+ }
+ }
+
+ return GGML_STATUS_SUCCESS;
+}
+
+void ggml_metal_graph_optimize(ggml_metal_t ctx, struct ggml_cgraph * gf) {
+ //const int64_t t_start = ggml_time_us();
+
+ if (ctx->use_graph_optimize) {
+ ggml_graph_optimize(gf);
+ }
+
+ //printf("%s: graph optimize took %.3f ms\n", __func__, (ggml_time_us() - t_start) / 1000.0);
+}
+
+void ggml_metal_event_record(ggml_metal_t ctx, ggml_metal_event_t ev) {
+ @autoreleasepool {
+ id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
+ id<MTLCommandBuffer> cmd_buf = [queue commandBuffer];
+
+ ggml_metal_event_encode_signal(ev, cmd_buf);
+
+ [cmd_buf commit];
+
+ [ctx->cmd_bufs_ext addObject:cmd_buf];
+ ctx->cmd_buf_last = cmd_buf;
+
+ [cmd_buf retain];
+ }
+}
+
+void ggml_metal_event_wait(ggml_metal_t ctx, ggml_metal_event_t ev) {
+ @autoreleasepool {
+ id<MTLCommandQueue> queue = ggml_metal_device_get_queue(ctx->dev);
+ id<MTLCommandBuffer> cmd_buf = [queue commandBuffer];
+
+ ggml_metal_event_encode_wait(ev, cmd_buf);
+
+ [cmd_buf commit];
+
+ [ctx->cmd_bufs_ext addObject:cmd_buf];
+ ctx->cmd_buf_last = cmd_buf;
+
+ [cmd_buf retain];
+ }
+}
+
+ggml_metal_event_t ggml_metal_get_ev_cpy(ggml_metal_t ctx) {
+ return ctx->ev_cpy;
+}
+
+void ggml_metal_set_n_cb(ggml_metal_t ctx, int n_cb) {
+ if (ctx->n_cb != n_cb) {
+ ctx->n_cb = MIN(n_cb, GGML_METAL_MAX_COMMAND_BUFFERS);
+
+ if (ctx->n_cb > 2) {
+ GGML_LOG_WARN("%s: n_cb = %d, using n_cb > 2 is not recommended and can degrade the performance in some cases\n", __func__, n_cb);
+ }
+ }
+
+ if (ctx->encode_async) {
+ Block_release(ctx->encode_async);
+ }
+
+ ctx->encode_async = Block_copy(^(size_t iter) {
+ const int cb_idx = iter;
+ const int n_cb_l = ctx->n_cb;
+
+ const int n_nodes_0 = ctx->n_nodes_0;
+ const int n_nodes_1 = ctx->n_nodes_1;
+
+ const int n_nodes_per_cb = ctx->n_nodes_per_cb;
+
+ int idx_start = 0;
+ int idx_end = n_nodes_0;
+
+ if (cb_idx < n_cb_l) {
+ idx_start = n_nodes_0 + ( (cb_idx + 0) * n_nodes_per_cb);
+ idx_end = n_nodes_0 + (MIN((cb_idx == n_cb_l - 1) ? n_nodes_1 : (cb_idx + 1) * n_nodes_per_cb, n_nodes_1));
+ }
+
+ id<MTLCommandBuffer> cmd_buf = ctx->cmd_bufs[cb_idx].obj;
+
+ ggml_metal_op_t ctx_op = ggml_metal_op_init(
+ ctx->dev,
+ cmd_buf,
+ ctx->gf,
+ idx_start,
+ idx_end,
+ ctx->use_fusion,
+ ctx->use_concurrency,
+ ctx->capture_next_compute,
+ ctx->debug_graph,
+ ctx->debug_fusion);
+
+ for (int idx = 0; idx < ggml_metal_op_n_nodes(ctx_op); ++idx) {
+ const int res = ggml_metal_op_encode(ctx_op, idx);
+ if (res == 0) {
+ break;
+ }
+
+ idx += res - 1;
+ }
+
+ ggml_metal_op_free(ctx_op);
+
+ if (cb_idx < 2 || ctx->abort_callback == NULL) {
+ [cmd_buf commit];
+ }
+ });
+}
+
+void ggml_metal_set_abort_callback(ggml_metal_t ctx, ggml_abort_callback abort_callback, void * user_data) {
+ ctx->abort_callback = abort_callback;
+ ctx->abort_callback_data = user_data;
+}
+
+bool ggml_metal_supports_family(ggml_metal_t ctx, int family) {
+ GGML_ASSERT(ctx->dev != nil);
+
+ id<MTLDevice> device = ggml_metal_device_get_obj(ctx->dev);
+
+ return [device supportsFamily:(MTLGPUFamilyApple1 + family - 1)];
+}
+
+void ggml_metal_capture_next_compute(ggml_metal_t ctx) {
+ ctx->capture_next_compute = true;
+}
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.cpp b/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.cpp
new file mode 100644
index 0000000..517559d
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.cpp
@@ -0,0 +1,1875 @@
+#include "ggml-metal-device.h"
+
+#include "ggml-metal-impl.h"
+
+#include "ggml-impl.h"
+
+#include <cassert>
+#include <memory>
+#include <string>
+#include <unordered_map>
+
+struct ggml_metal_device_deleter {
+ void operator()(ggml_metal_device_t ctx) {
+ ggml_metal_device_free(ctx);
+ }
+};
+
+typedef std::unique_ptr<ggml_metal_device, ggml_metal_device_deleter> ggml_metal_device_ptr;
+
+ggml_metal_device_t ggml_metal_device_get(int device) {
+ static std::vector<ggml_metal_device_ptr> devs;
+
+ devs.emplace_back(ggml_metal_device_init(device));
+
+ return devs.back().get();
+}
+
+struct ggml_metal_pipelines {
+ std::unordered_map<std::string, ggml_metal_pipeline_t> data;
+};
+
+ggml_metal_pipelines_t ggml_metal_pipelines_init(void) {
+ ggml_metal_pipelines_t res = new ggml_metal_pipelines();
+
+ return res;
+}
+
+void ggml_metal_pipelines_free(ggml_metal_pipelines_t ppls) {
+ if (!ppls) {
+ return;
+ }
+
+ for (auto it = ppls->data.begin(); it != ppls->data.end(); ++it) {
+ ggml_metal_pipeline_free(it->second);
+ }
+
+ delete ppls;
+}
+
+void ggml_metal_pipelines_add(ggml_metal_pipelines_t ppls, const char * name, ggml_metal_pipeline_t pipeline) {
+ ppls->data[name] = pipeline;
+}
+
+ggml_metal_pipeline_t ggml_metal_pipelines_get(ggml_metal_pipelines_t ppls, const char * name) {
+ if (ppls->data.find(name) == ppls->data.end()) {
+ return nullptr;
+ }
+
+ return ppls->data[name];
+}
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_base(ggml_metal_library_t lib, ggml_op op) {
+ char base[256];
+ char name[256];
+
+ const char * op_str = "undefined";
+ switch (op) {
+ case GGML_OP_ADD_ID: op_str = "add_id"; break;
+ case GGML_OP_CONCAT: op_str = "concat"; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_%s", op_str);
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cpy(ggml_metal_library_t lib, ggml_type tsrc, ggml_type tdst) {
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_cpy_%s_%s", ggml_type_name(tsrc), ggml_type_name(tdst));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pool_1d(ggml_metal_library_t lib, const ggml_tensor * op, ggml_op_pool op_pool) {
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32 && op->src[0]->type == op->type);
+
+ const char * pool_str = "undefined";
+ switch (op_pool) {
+ case GGML_OP_POOL_AVG: pool_str = "avg"; break;
+ case GGML_OP_POOL_MAX: pool_str = "max"; break;
+ default: GGML_ASSERT(false && "not implemented");
+ };
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, sizeof(base), "kernel_pool_1d_%s_%s", pool_str, ggml_type_name(op->src[0]->type));
+ snprintf(name, sizeof(name), "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pool_2d(ggml_metal_library_t lib, const ggml_tensor * op, ggml_op_pool op_pool) {
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32 && op->src[0]->type == op->type);
+
+ const char * pool_str = "undefined";
+ switch (op_pool) {
+ case GGML_OP_POOL_AVG: pool_str = "avg"; break;
+ case GGML_OP_POOL_MAX: pool_str = "max"; break;
+ default: GGML_ASSERT(false && "not implemented");
+ };
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_pool_2d_%s_%s", pool_str, ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_get_rows(ggml_metal_library_t lib, ggml_type tsrc) {
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_get_rows_%s", ggml_type_name(tsrc));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_set_rows(ggml_metal_library_t lib, ggml_type tidx, ggml_type tdst) {
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_set_rows_%s_%s", ggml_type_name(tdst), ggml_type_name(tidx));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_diag(ggml_metal_library_t lib, const ggml_tensor * op) {
+ char base[256];
+ char name[256];
+
+ const int n = op->src[0]->ne[0];
+
+ snprintf(base, 256, "kernel_diag_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s_n=%d", base, n);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.nsg = 1;
+ res.smem = 0;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_repeat(ggml_metal_library_t lib, ggml_type tsrc) {
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_repeat_%s", ggml_type_name(tsrc));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_unary(ggml_metal_library_t lib, const ggml_tensor * op) {
+ char base[256];
+ char name[256];
+
+ int op_num = -1;
+
+ switch (op->op) {
+ case GGML_OP_SCALE: op_num = OP_UNARY_NUM_SCALE; break;
+ case GGML_OP_FILL: op_num = OP_UNARY_NUM_FILL; break;
+ case GGML_OP_CLAMP: op_num = OP_UNARY_NUM_CLAMP; break;
+ case GGML_OP_SQR: op_num = OP_UNARY_NUM_SQR; break;
+ case GGML_OP_SQRT: op_num = OP_UNARY_NUM_SQRT; break;
+ case GGML_OP_SIN: op_num = OP_UNARY_NUM_SIN; break;
+ case GGML_OP_COS: op_num = OP_UNARY_NUM_COS; break;
+ case GGML_OP_LOG: op_num = OP_UNARY_NUM_LOG; break;
+ case GGML_OP_LEAKY_RELU: op_num = OP_UNARY_NUM_LEAKY_RELU; break;
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(op)) {
+ case GGML_UNARY_OP_TANH: op_num = OP_UNARY_NUM_TANH; break;
+ case GGML_UNARY_OP_RELU: op_num = OP_UNARY_NUM_RELU; break;
+ case GGML_UNARY_OP_SIGMOID: op_num = OP_UNARY_NUM_SIGMOID; break;
+ case GGML_UNARY_OP_GELU: op_num = OP_UNARY_NUM_GELU; break;
+ case GGML_UNARY_OP_GELU_ERF: op_num = OP_UNARY_NUM_GELU_ERF; break;
+ case GGML_UNARY_OP_GELU_QUICK: op_num = OP_UNARY_NUM_GELU_QUICK; break;
+ case GGML_UNARY_OP_SILU: op_num = OP_UNARY_NUM_SILU; break;
+ case GGML_UNARY_OP_ELU: op_num = OP_UNARY_NUM_ELU; break;
+ case GGML_UNARY_OP_NEG: op_num = OP_UNARY_NUM_NEG; break;
+ case GGML_UNARY_OP_ABS: op_num = OP_UNARY_NUM_ABS; break;
+ case GGML_UNARY_OP_SGN: op_num = OP_UNARY_NUM_SGN; break;
+ case GGML_UNARY_OP_STEP: op_num = OP_UNARY_NUM_STEP; break;
+ case GGML_UNARY_OP_HARDSWISH: op_num = OP_UNARY_NUM_HARDSWISH; break;
+ case GGML_UNARY_OP_HARDSIGMOID: op_num = OP_UNARY_NUM_HARDSIGMOID; break;
+ case GGML_UNARY_OP_EXP: op_num = OP_UNARY_NUM_EXP; break;
+ case GGML_UNARY_OP_SOFTPLUS: op_num = OP_UNARY_NUM_SOFTPLUS; break;
+ case GGML_UNARY_OP_EXPM1: op_num = OP_UNARY_NUM_EXPM1; break;
+ default: GGML_ABORT("fatal error");
+ } break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ const char * t0_str = ggml_type_name(op->src[0]->type);
+ const char * t_str = ggml_type_name(op->type);
+
+ const bool is_c4 = op->src[0]->ne[0] % 4 == 0;
+ const bool is_cnt = ggml_is_contiguous(op->src[0]) && ggml_nelements(op) < 32768;
+
+ snprintf(base, 256, "kernel_unary_%s_%s%s", t0_str, t_str, is_c4 ? "_4" : "");
+ snprintf(name, 256, "%s_op=%d_cnt=%d", base, op_num, is_cnt);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, op_num, FC_UNARY + 0);
+ ggml_metal_cv_set_bool (cv, is_cnt, FC_UNARY + 1);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ res.c4 = is_c4;
+ res.cnt = is_cnt;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_glu(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(ggml_is_contiguous_1(op->src[0]));
+
+ char base[256];
+ char name[256];
+
+ const char * op_str = "undefined";
+ switch (op->op) {
+ case GGML_OP_GLU:
+ switch (ggml_get_glu_op(op)) {
+ case GGML_GLU_OP_REGLU: op_str = "reglu"; break;
+ case GGML_GLU_OP_GEGLU: op_str = "geglu"; break;
+ case GGML_GLU_OP_SWIGLU: op_str = "swiglu"; break;
+ case GGML_GLU_OP_SWIGLU_OAI: op_str = "swiglu_oai"; break;
+ case GGML_GLU_OP_GEGLU_ERF: op_str = "geglu_erf"; break;
+ case GGML_GLU_OP_GEGLU_QUICK: op_str = "geglu_quick"; break;
+ default: GGML_ABORT("fatal error");
+ } break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_%s_%s", op_str, ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_SUM);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_op_sum_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum_rows(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type));
+
+ char base[256];
+ char name[256];
+
+ const char * op_str = "undefined";
+ switch (op->op) {
+ case GGML_OP_SUM_ROWS:
+ op_str = "sum_rows"; break;
+ case GGML_OP_MEAN:
+ op_str = "mean"; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_%s_%s", op_str, ggml_type_name(op->src[0]->type));
+
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.smem = 32*sizeof(float);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_blk(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(op->op == GGML_OP_CUMSUM);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_cumsum_blk_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_add(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(op->op == GGML_OP_CUMSUM);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_cumsum_add_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_tri(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(op->op == GGML_OP_TRI);
+ GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type));
+
+ char base[256];
+ char name[256];
+
+ const char * op_str = "tri";
+ const int ttype = op->op_params[0];
+
+ snprintf(base, 256, "kernel_%s_%s_%d", op_str, ggml_type_name(op->src[0]->type), ttype);
+
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_soft_max(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(!op->src[1] || op->src[1]->type == GGML_TYPE_F16 || op->src[1]->type == GGML_TYPE_F32);
+
+ char base[256];
+ char name[256];
+
+ const char * suffix = "";
+
+ if (op->src[0]->ne[0] % 4 == 0) {
+ suffix = "_4";
+ }
+
+ const ggml_type tsrc1 = op->src[1] ? op->src[1]->type : GGML_TYPE_F32;
+
+ snprintf(base, 256, "kernel_soft_max_%s%s", ggml_type_name(tsrc1), suffix);
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.smem = 32*sizeof(float);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+
+ char base[256];
+ char name[256];
+
+ const char * suffix = "";
+
+ if (op->src[1]->ne[0] % 4 == 0) {
+ suffix = "_4";
+ }
+
+ snprintf(base, 256, "kernel_ssm_conv_%s_%s%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type), suffix);
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv_batched(ggml_metal_library_t lib, const ggml_tensor * op, int ssm_conv_bs) {
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+
+ char base[256];
+ char name[256];
+
+ const char * suffix = "";
+ if (op->src[1]->ne[0] % 4 == 0) {
+ suffix = "_4";
+ }
+
+ snprintf(base, 256, "kernel_ssm_conv_%s_%s_batched%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type), suffix);
+ snprintf(name, 256, "%s_ssm_conv_bs=%d", base, ssm_conv_bs);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, ssm_conv_bs, FC_SSM_CONV + 0);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_scan(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+
+ char base[256];
+ char name[256];
+
+ const int nsg = (ne00 + 31)/32;
+
+ snprintf(base, 256, "kernel_ssm_scan_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s_nsg=%d", base, nsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ // Shared memory layout:
+ // - sgptg * NW floats for partial sums (nsg * 32)
+ // - sgptg floats for shared_x_dt (nsg)
+ // - sgptg floats for shared_dA (nsg)
+ // Total: nsg * (32 + 2) floats
+ res.smem = (32 + 2)*sizeof(float)*nsg;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rwkv(ggml_metal_library_t lib, const ggml_tensor * op) {
+ char base[256];
+ char name[256];
+
+ const int64_t C = op->ne[0];
+ const int64_t H = op->src[0]->ne[1];
+
+ switch (op->op) {
+ case GGML_OP_RWKV_WKV6:
+ {
+ GGML_ASSERT(op->src[5]->type == GGML_TYPE_F32);
+ GGML_ASSERT(C % H == 0);
+ GGML_ASSERT(C / H == 64);
+
+ snprintf(base, 256, "kernel_rwkv_wkv6_%s", ggml_type_name(op->src[0]->type));
+ } break;
+ case GGML_OP_RWKV_WKV7:
+ {
+ GGML_ASSERT(op->src[6]->type == GGML_TYPE_F32);
+ GGML_ASSERT(C % H == 0);
+ GGML_ASSERT(C / H == 64);
+
+ snprintf(base, 256, "kernel_rwkv_wkv7_%s", ggml_type_name(op->src[0]->type));
+ } break;
+ default:
+ GGML_ABORT("fatal error");
+ }
+
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_solve_tri(ggml_metal_library_t lib, const ggml_tensor * op) {
+ char base[256];
+ char name[256];
+
+ const int nsg = 8;
+ const int n = op->src[1]->ne[1];
+ const int k = op->src[1]->ne[0];
+
+ snprintf(base, 256, "kernel_solve_tri_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s_nsg=%d_n=%d_k=%d", base, nsg, n, k);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, nsg, FC_SOLVE_TRI + 0);
+ ggml_metal_cv_set_int16(cv, n, FC_SOLVE_TRI + 1);
+ ggml_metal_cv_set_int16(cv, k, FC_SOLVE_TRI + 2);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ res.nsg = nsg;
+ res.smem = GGML_PAD(GGML_PAD(n, 32)*nsg*sizeof(float), 16);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_ext(ggml_metal_library_t lib, ggml_type tsrc0, ggml_type tsrc1, int nsg, int nxpsg, int r1ptg) {
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_mul_mv_ext_%s_%s_r1_%d", ggml_type_name(tsrc0), ggml_type_name(tsrc1), r1ptg);
+ snprintf(name, 256, "%s_nsg=%d_nxpsg=%d", base, nsg, nxpsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, nsg, FC_MUL_MV + 0);
+ ggml_metal_cv_set_int16(cv, nxpsg, FC_MUL_MV + 1);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm(ggml_metal_library_t lib, const ggml_tensor * op) {
+ char base[256];
+ char name[256];
+
+ const ggml_type tsrc0 = op->src[0]->type;
+ const ggml_type tsrc1 = op->src[1]->type;
+
+ const bool bc_inp = op->src[0]->ne[0] % 32 != 0;
+ const bool bc_out = op->ne[0] % 64 != 0 || op->ne[1] % 32 != 0;
+
+ snprintf(base, 256, "kernel_mul_mm_%s_%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1));
+ snprintf(name, 256, "%s_bci=%d_bco=%d", base, bc_inp, bc_out);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_bool(cv, bc_inp, FC_MUL_MM + 0);
+ ggml_metal_cv_set_bool(cv, bc_out, FC_MUL_MM + 1);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ // when the output size is not multiple of 64x32, we need extra smem to prevent out-of-bounds writes
+ res.smem = bc_out ? 8192 : 4096 + 2048;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+
+ char base[256];
+ char name[256];
+
+ int nsg = 0; // number of simdgroups
+ int nr0 = 0; // number of src0 rows per simdgroup
+ int nr1 = 1; // number of src1 rows per threadgroup
+
+ size_t smem = 0; // shared memory
+
+ const ggml_type tsrc0 = op->src[0]->type;
+ const ggml_type tsrc1 = op->src[1]->type;
+
+ const char * suffix = "";
+
+ // use custom matrix x vector kernel
+ switch (tsrc0) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
+ {
+ if (ne00 < 32) {
+ nsg = 1;
+ nr0 = 32;
+ nr1 = 1;
+ suffix = "_short";
+ } else {
+ nsg = std::min(4, (ne00 + 127) / 128);
+ nr0 = 2;
+ nr1 = 1;
+ smem = 32*sizeof(float)*nr0;
+ suffix = ne00 % 4 == 0 ? "_4" : "";
+ }
+ } break;
+ case GGML_TYPE_Q4_0:
+ {
+ nsg = N_SG_Q4_0;
+ nr0 = N_R0_Q4_0;
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ nsg = N_SG_Q4_1;
+ nr0 = N_R0_Q4_1;
+ } break;
+ case GGML_TYPE_Q5_0:
+ {
+ nsg = N_SG_Q5_0;
+ nr0 = N_R0_Q5_0;
+ } break;
+ case GGML_TYPE_Q5_1:
+ {
+ nsg = N_SG_Q5_1;
+ nr0 = N_R0_Q5_1;
+ } break;
+ case GGML_TYPE_Q8_0:
+ {
+ nsg = N_SG_Q8_0;
+ nr0 = N_R0_Q8_0;
+ smem = 32*sizeof(float)*N_R0_Q8_0;
+ } break;
+ case GGML_TYPE_MXFP4:
+ {
+ nsg = N_SG_MXFP4;
+ nr0 = N_R0_MXFP4;
+ smem = 32*sizeof(float);
+ } break;
+ case GGML_TYPE_Q2_K:
+ {
+ nsg = N_SG_Q2_K;
+ nr0 = N_R0_Q2_K;
+ } break;
+ case GGML_TYPE_Q3_K:
+ {
+ nsg = N_SG_Q3_K;
+ nr0 = N_R0_Q3_K;
+ } break;
+ case GGML_TYPE_Q4_K:
+ {
+ nsg = N_SG_Q4_K;
+ nr0 = N_R0_Q4_K;
+ } break;
+ case GGML_TYPE_Q5_K:
+ {
+ nsg = N_SG_Q5_K;
+ nr0 = N_R0_Q5_K;
+ } break;
+ case GGML_TYPE_Q6_K:
+ {
+ nsg = N_SG_Q6_K;
+ nr0 = N_R0_Q6_K;
+ } break;
+ case GGML_TYPE_IQ2_XXS:
+ {
+ nsg = N_SG_IQ2_XXS;
+ nr0 = N_R0_IQ2_XXS;
+ smem = 256*8+128;
+ } break;
+ case GGML_TYPE_IQ2_XS:
+ {
+ nsg = N_SG_IQ2_XS;
+ nr0 = N_R0_IQ2_XS;
+ smem = 512*8+128;
+ } break;
+ case GGML_TYPE_IQ3_XXS:
+ {
+ nsg = N_SG_IQ3_XXS;
+ nr0 = N_R0_IQ3_XXS;
+ smem = 256*4+128;
+ } break;
+ case GGML_TYPE_IQ3_S:
+ {
+ nsg = N_SG_IQ3_S;
+ nr0 = N_R0_IQ3_S;
+ smem = 512*4;
+ } break;
+ case GGML_TYPE_IQ2_S:
+ {
+ nsg = N_SG_IQ2_S;
+ nr0 = N_R0_IQ2_S;
+ } break;
+ case GGML_TYPE_IQ1_S:
+ {
+ nsg = N_SG_IQ1_S;
+ nr0 = N_R0_IQ1_S;
+ } break;
+ case GGML_TYPE_IQ1_M:
+ {
+ nsg = N_SG_IQ1_M;
+ nr0 = N_R0_IQ1_M;
+ } break;
+ case GGML_TYPE_IQ4_NL:
+ {
+ nsg = N_SG_IQ4_NL;
+ nr0 = N_R0_IQ4_NL;
+ smem = 32*sizeof(float);
+ } break;
+ case GGML_TYPE_IQ4_XS:
+ {
+ nsg = N_SG_IQ4_XS;
+ nr0 = N_R0_IQ4_XS;
+ smem = 32*sizeof(float);
+ } break;
+ default:
+ {
+ GGML_LOG_ERROR("Asserting on type %d\n", (int) tsrc0);
+ GGML_ABORT("not implemented");
+ }
+ };
+
+ snprintf(base, 256, "kernel_mul_mv_%s_%s%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1), suffix);
+ snprintf(name, 256, "%s_nsg=%d", base, nsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, nsg, FC_MUL_MV + 0);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ res.nr0 = nr0;
+ res.nr1 = nr1;
+ res.nsg = nsg;
+ res.smem = smem;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id_map0(ggml_metal_library_t lib, int ne02, int ne20) {
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_mul_mm_id_map0_ne20_%d", ne20);
+ snprintf(name, 256, "%s_ne02=%d", base, ne02);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.smem = (size_t) ne02*ne20*sizeof(uint16_t);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id(ggml_metal_library_t lib, const ggml_tensor * op) {
+ char base[256];
+ char name[256];
+
+ const ggml_type tsrc0 = op->src[0]->type;
+ const ggml_type tsrc1 = op->src[1]->type;
+
+ const bool bc_inp = op->src[0]->ne[0] % 32 != 0;
+
+ snprintf(base, 256, "kernel_mul_mm_id_%s_%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1));
+ snprintf(name, 256, "%s_bci=%d", base, bc_inp);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_bool(cv, bc_inp, FC_MUL_MM + 0);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ res.smem = 8192;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_id(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+
+ char base[256];
+ char name[256];
+
+ int nsg = 0; // number of simdgroups
+ int nr0 = 0; // number of src0 rows per simdgroup
+ int nr1 = 1; // number of src1 rows per threadgroup
+
+ size_t smem = 0; // shared memory
+
+ const ggml_type tsrc0 = op->src[0]->type;
+ const ggml_type tsrc1 = op->src[1]->type;
+
+ const char * suffix = "";
+
+ // use custom matrix x vector kernel
+ switch (tsrc0) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
+ {
+ nsg = std::min(4, (ne00 + 127) / 128);
+ nr0 = 2;
+ nr1 = 1;
+ smem = 32*sizeof(float)*nr0;
+ suffix = ne00 % 4 == 0 ? "_4" : "";
+ } break;
+ case GGML_TYPE_Q4_0:
+ {
+ nsg = N_SG_Q4_0;
+ nr0 = N_R0_Q4_0;
+ } break;
+ case GGML_TYPE_Q4_1:
+ {
+ nsg = N_SG_Q4_1;
+ nr0 = N_R0_Q4_1;
+ } break;
+ case GGML_TYPE_Q5_0:
+ {
+ nsg = N_SG_Q5_0;
+ nr0 = N_R0_Q5_0;
+ } break;
+ case GGML_TYPE_Q5_1:
+ {
+ nsg = N_SG_Q5_1;
+ nr0 = N_R0_Q5_1;
+ } break;
+ case GGML_TYPE_Q8_0:
+ {
+ nsg = N_SG_Q8_0;
+ nr0 = N_R0_Q8_0;
+ smem = 32*sizeof(float)*N_R0_Q8_0;
+ } break;
+ case GGML_TYPE_MXFP4:
+ {
+ nsg = N_SG_MXFP4;
+ nr0 = N_R0_MXFP4;
+ smem = 32*sizeof(float);
+ } break;
+ case GGML_TYPE_Q2_K:
+ {
+ nsg = N_SG_Q2_K;
+ nr0 = N_R0_Q2_K;
+ } break;
+ case GGML_TYPE_Q3_K:
+ {
+ nsg = N_SG_Q3_K;
+ nr0 = N_R0_Q3_K;
+ } break;
+ case GGML_TYPE_Q4_K:
+ {
+ nsg = N_SG_Q4_K;
+ nr0 = N_R0_Q4_K;
+ } break;
+ case GGML_TYPE_Q5_K:
+ {
+ nsg = N_SG_Q5_K;
+ nr0 = N_R0_Q5_K;
+ } break;
+ case GGML_TYPE_Q6_K:
+ {
+ nsg = N_SG_Q6_K;
+ nr0 = N_R0_Q6_K;
+ } break;
+ case GGML_TYPE_IQ2_XXS:
+ {
+ nsg = N_SG_IQ2_XXS;
+ nr0 = N_R0_IQ2_XXS;
+ smem = 256*8+128;
+ } break;
+ case GGML_TYPE_IQ2_XS:
+ {
+ nsg = N_SG_IQ2_XS;
+ nr0 = N_R0_IQ2_XS;
+ smem = 512*8+128;
+ } break;
+ case GGML_TYPE_IQ3_XXS:
+ {
+ nsg = N_SG_IQ3_XXS;
+ nr0 = N_R0_IQ3_XXS;
+ smem = 256*4+128;
+ } break;
+ case GGML_TYPE_IQ3_S:
+ {
+ nsg = N_SG_IQ3_S;
+ nr0 = N_R0_IQ3_S;
+ smem = 512*4;
+ } break;
+ case GGML_TYPE_IQ2_S:
+ {
+ nsg = N_SG_IQ2_S;
+ nr0 = N_R0_IQ2_S;
+ } break;
+ case GGML_TYPE_IQ1_S:
+ {
+ nsg = N_SG_IQ1_S;
+ nr0 = N_R0_IQ1_S;
+ } break;
+ case GGML_TYPE_IQ1_M:
+ {
+ nsg = N_SG_IQ1_M;
+ nr0 = N_R0_IQ1_M;
+ } break;
+ case GGML_TYPE_IQ4_NL:
+ {
+ nsg = N_SG_IQ4_NL;
+ nr0 = N_R0_IQ4_NL;
+ smem = 32*sizeof(float);
+ } break;
+ case GGML_TYPE_IQ4_XS:
+ {
+ nsg = N_SG_IQ4_XS;
+ nr0 = N_R0_IQ4_XS;
+ smem = 32*sizeof(float);
+ } break;
+ default:
+ {
+ GGML_LOG_ERROR("Asserting on type %d\n", (int)op->src[2]->type);
+ GGML_ABORT("not implemented");
+ }
+ };
+
+ snprintf(base, 256, "kernel_mul_mv_id_%s_%s%s", ggml_type_name(tsrc0), ggml_type_name(tsrc1), suffix);
+ snprintf(name, 256, "%s_nsg=%d", base, nsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, nsg, FC_MUL_MV + 0);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ res.nr0 = nr0;
+ res.nr1 = nr1;
+ res.nsg = nsg;
+ res.smem = smem;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argmax(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(ggml_is_contiguous_1(op->src[0]));
+ GGML_ASSERT(op->src[0]->nb[0] == ggml_type_size(op->src[0]->type));
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_argmax_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.smem = 32*(sizeof(float) + sizeof(int32_t));
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_ARGSORT);
+
+ char base[256];
+ char name[256];
+
+ ggml_sort_order order = (ggml_sort_order) op->op_params[0];
+
+ const char * order_str = "undefined";
+ switch (order) {
+ case GGML_SORT_ORDER_ASC: order_str = "asc"; break;
+ case GGML_SORT_ORDER_DESC: order_str = "desc"; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_argsort_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str);
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort_merge(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_ARGSORT);
+
+ char base[256];
+ char name[256];
+
+ ggml_sort_order order = (ggml_sort_order) op->op_params[0];
+
+ const char * order_str = "undefined";
+ switch (order) {
+ case GGML_SORT_ORDER_ASC: order_str = "asc"; break;
+ case GGML_SORT_ORDER_DESC: order_str = "desc"; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_argsort_merge_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str);
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+// note: reuse the argsort kernel for top_k
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_TOP_K);
+
+ char base[256];
+ char name[256];
+
+ // note: the top_k kernel is always descending order
+ ggml_sort_order order = GGML_SORT_ORDER_DESC;
+
+ const char * order_str = "undefined";
+ switch (order) {
+ case GGML_SORT_ORDER_ASC: order_str = "asc"; break;
+ case GGML_SORT_ORDER_DESC: order_str = "desc"; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_argsort_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str);
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k_merge(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_TOP_K);
+
+ char base[256];
+ char name[256];
+
+ ggml_sort_order order = GGML_SORT_ORDER_DESC;
+
+ const char * order_str = "undefined";
+ switch (order) {
+ case GGML_SORT_ORDER_ASC: order_str = "asc"; break;
+ case GGML_SORT_ORDER_DESC: order_str = "desc"; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_argsort_merge_%s_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->type), order_str);
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_pad(
+ ggml_metal_library_t lib,
+ const struct ggml_tensor * op,
+ bool has_mask,
+ int32_t ncpsg) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+ GGML_UNUSED(op);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_%s",
+ "flash_attn_ext_pad");
+
+ snprintf(name, 256, "%s_mask=%d_ncpsg=%d",
+ base,
+ has_mask,
+ ncpsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT_PAD + 0);
+ //ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT_PAD + 1);
+ //ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT_PAD + 2);
+ //ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT_PAD + 3);
+
+ //ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT_PAD + 20);
+ //ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT_PAD + 21);
+ //ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT_PAD + 22);
+ //ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_PAD + 23);
+ //ggml_metal_cv_set_int32(cv, nqptg, FC_FLASH_ATTN_EXT_PAD + 24);
+ ggml_metal_cv_set_int32(cv, ncpsg, FC_FLASH_ATTN_EXT_PAD + 25);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_blk(
+ ggml_metal_library_t lib,
+ const struct ggml_tensor * op,
+ int32_t nqptg,
+ int32_t ncpsg) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+ GGML_UNUSED(op);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_%s",
+ "flash_attn_ext_blk");
+
+ snprintf(name, 256, "%s_nqptg=%d_ncpsg=%d",
+ base,
+ nqptg,
+ ncpsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ //ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT_BLK + 0);
+ //ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT_BLK + 1);
+ //ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT_BLK + 2);
+ //ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT_BLK + 3);
+
+ //ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT_BLK + 20);
+ //ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT_BLK + 21);
+ //ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT_BLK + 22);
+ //ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_BLK + 23);
+ ggml_metal_cv_set_int32(cv, nqptg, FC_FLASH_ATTN_EXT_BLK + 24);
+ ggml_metal_cv_set_int32(cv, ncpsg, FC_FLASH_ATTN_EXT_BLK + 25);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext(
+ ggml_metal_library_t lib,
+ const ggml_tensor * op,
+ bool has_mask,
+ bool has_sinks,
+ bool has_bias,
+ bool has_scap,
+ bool has_kvpad,
+ int32_t nsg) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+ char base[256];
+ char name[256];
+
+ const int32_t dk = (int32_t) op->src[1]->ne[0];
+ const int32_t dv = (int32_t) op->src[2]->ne[0];
+
+ const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0];
+ const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0];
+
+ // do bounds checks for the mask?
+ const bool bc_mask = op->src[3] && (op->src[3]->ne[1] % 8 != 0);
+
+ snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d",
+ "flash_attn_ext",
+ ggml_type_name(op->src[1]->type),
+ dk,
+ dv);
+
+ snprintf(name, 256, "%s_mask=%d_sinks=%d_bias=%d_scap=%d_kvpad=%d_bcm=%d_ns10=%d_ns20=%d_nsg=%d",
+ base,
+ has_mask,
+ has_sinks,
+ has_bias,
+ has_scap,
+ has_kvpad,
+ bc_mask,
+ ns10,
+ ns20,
+ nsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT + 0);
+ ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT + 1);
+ ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT + 2);
+ ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT + 3);
+ ggml_metal_cv_set_bool(cv, has_kvpad, FC_FLASH_ATTN_EXT + 4);
+
+ ggml_metal_cv_set_bool(cv, bc_mask, FC_FLASH_ATTN_EXT + 10);
+
+ ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT + 20);
+ ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT + 21);
+ ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT + 22);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec(
+ ggml_metal_library_t lib,
+ const ggml_tensor * op,
+ bool has_mask,
+ bool has_sinks,
+ bool has_bias,
+ bool has_scap,
+ bool has_kvpad,
+ int32_t nsg,
+ int32_t nwg) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+ char base[256];
+ char name[256];
+
+ const int32_t dk = (int32_t) op->src[1]->ne[0];
+ const int32_t dv = (int32_t) op->src[2]->ne[0];
+
+ const int32_t ns10 = op->src[1]->nb[1]/op->src[1]->nb[0];
+ const int32_t ns20 = op->src[2]->nb[1]/op->src[2]->nb[0];
+
+ snprintf(base, 256, "kernel_%s_%s_dk%d_dv%d",
+ "flash_attn_ext_vec",
+ ggml_type_name(op->src[1]->type),
+ dk,
+ dv);
+
+ snprintf(name, 256, "%s_mask=%d_sink=%d_bias=%d_scap=%d_kvpad=%d_ns10=%d_ns20=%d_nsg=%d_nwg=%d",
+ base,
+ has_mask,
+ has_sinks,
+ has_bias,
+ has_scap,
+ has_kvpad,
+ ns10,
+ ns20,
+ nsg, nwg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_bool(cv, has_mask, FC_FLASH_ATTN_EXT_VEC + 0);
+ ggml_metal_cv_set_bool(cv, has_sinks, FC_FLASH_ATTN_EXT_VEC + 1);
+ ggml_metal_cv_set_bool(cv, has_bias, FC_FLASH_ATTN_EXT_VEC + 2);
+ ggml_metal_cv_set_bool(cv, has_scap, FC_FLASH_ATTN_EXT_VEC + 3);
+ ggml_metal_cv_set_bool(cv, has_kvpad, FC_FLASH_ATTN_EXT_VEC + 4);
+
+ ggml_metal_cv_set_int32(cv, ns10, FC_FLASH_ATTN_EXT_VEC + 20);
+ ggml_metal_cv_set_int32(cv, ns20, FC_FLASH_ATTN_EXT_VEC + 21);
+ ggml_metal_cv_set_int32(cv, nsg, FC_FLASH_ATTN_EXT_VEC + 22);
+ ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_VEC + 23);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(
+ ggml_metal_library_t lib,
+ const ggml_tensor * op,
+ int32_t dv,
+ int32_t nwg) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_flash_attn_ext_vec_reduce");
+ snprintf(name, 256, "%s_dv=%d_nwg=%d", base, dv, nwg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int32(cv, dv, FC_FLASH_ATTN_EXT_VEC_REDUCE + 0);
+ ggml_metal_cv_set_int32(cv, nwg, FC_FLASH_ATTN_EXT_VEC_REDUCE + 1);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+
+ GGML_UNUSED(op);
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin(ggml_metal_library_t lib, const ggml_tensor * op, int32_t n_fuse) {
+ char base[256];
+ char name[256];
+
+ int op_num = -1;
+
+ switch (op->op) {
+ case GGML_OP_ADD: op_num = 0; break;
+ case GGML_OP_SUB: op_num = 1; break;
+ case GGML_OP_MUL: op_num = 2; break;
+ case GGML_OP_DIV: op_num = 3; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ const char * t0_str = ggml_type_name(op->src[0]->type);
+ const char * t1_str = ggml_type_name(op->src[1]->type);
+ const char * t_str = ggml_type_name(op->type);
+
+ const bool is_c4 = (op->src[0]->ne[0] % 4 == 0) && (op->src[1]->ne[0] % 4 == 0);
+
+ const bool is_rb = ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]) && (ggml_nrows(op->src[1]) == 1) && ggml_nelements(op) < 65536;
+
+ snprintf(base, 256, "kernel_bin_fuse_%s_%s_%s%s", t0_str, t1_str, t_str, is_c4 ? "_4" : "");
+ snprintf(name, 256, "%s_op=%d_nf=%d_rb=%d", base, op_num, n_fuse, is_rb);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, op_num, FC_BIN + 0);
+ ggml_metal_cv_set_int16(cv, n_fuse, FC_BIN + 1);
+ ggml_metal_cv_set_bool (cv, is_rb, FC_BIN + 2);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ res.c4 = is_c4;
+ res.cnt = is_rb;
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin_one(ggml_metal_library_t lib, ggml_op op) {
+ char base[256];
+ char name[256];
+
+ int op_num = -1;
+
+ switch (op) {
+ case GGML_OP_ADD: op_num = 0; break;
+ case GGML_OP_SUB: op_num = 1; break;
+ case GGML_OP_MUL: op_num = 2; break;
+ case GGML_OP_DIV: op_num = 3; break;
+ default: GGML_ABORT("fatal error");
+ };
+
+ snprintf(base, 256, "kernel_bin_fuse_%s_%s_%s", "f32", "f32", "f32");
+ snprintf(name, 256, "%s_op=%d_nf=%d", base, op_num, 1);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, op_num, FC_BIN + 0);
+ ggml_metal_cv_set_int16(cv, 1, FC_BIN + 1);
+ ggml_metal_cv_set_bool (cv, false, FC_BIN + 2);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_l2_norm(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_L2_NORM);
+
+ char base[256];
+ char name[256];
+
+ const bool is_c4 = op->src[0]->ne[0] % 4 == 0;
+
+ const char * t0_str = ggml_type_name(op->src[0]->type);
+ const char * t_str = ggml_type_name(op->type);
+
+ snprintf(base, 256, "kernel_l2_norm_%s_%s%s", t0_str, t_str, is_c4 ? "_4" : "");
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.c4 = is_c4;
+ res.smem = 32*sizeof(float);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_group_norm(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_GROUP_NORM);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_group_norm_f32");
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.smem = 32*sizeof(float);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_norm(ggml_metal_library_t lib, const ggml_tensor * op, int n_fuse) {
+ assert(op->op == GGML_OP_NORM || op->op == GGML_OP_RMS_NORM);
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+ char base[256];
+ char name[256];
+
+ const char * suffix = "";
+ if (op->ne[0] % 4 == 0) {
+ suffix = "_4";
+ }
+
+ switch (op->op) {
+ case GGML_OP_NORM:
+ switch (n_fuse) {
+ case 1: snprintf(base, 256, "kernel_norm_f32%s", suffix); break;
+ case 2: snprintf(base, 256, "kernel_norm_mul_f32%s", suffix); break;
+ case 3: snprintf(base, 256, "kernel_norm_mul_add_f32%s", suffix); break;
+ default: GGML_ABORT("fatal error");
+ } break;
+ case GGML_OP_RMS_NORM:
+ switch (n_fuse) {
+ case 1: snprintf(base, 256, "kernel_rms_norm_f32%s", suffix); break;
+ case 2: snprintf(base, 256, "kernel_rms_norm_mul_f32%s", suffix); break;
+ case 3: snprintf(base, 256, "kernel_rms_norm_mul_add_f32%s", suffix); break;
+ default: GGML_ABORT("fatal error");
+ } break;
+ default: GGML_ABORT("fatal error");
+ }
+
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ res.smem = 32*sizeof(float);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rope(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_ROPE);
+
+ char base[256];
+ char name[256];
+
+ const int mode = ((const int32_t *) op->op_params)[2];
+
+ const bool is_neox = mode & GGML_ROPE_TYPE_NEOX;
+ const bool is_mrope = mode & GGML_ROPE_TYPE_MROPE;
+ const bool is_imrope = mode == GGML_ROPE_TYPE_IMROPE;
+ const bool is_vision = mode == GGML_ROPE_TYPE_VISION;
+
+ if (is_neox) {
+ snprintf(base, 256, "kernel_rope_neox_%s", ggml_type_name(op->src[0]->type));
+ } else if ((is_mrope || is_imrope) && !is_vision) {
+ GGML_ASSERT(op->src[1]->ne[0]*4 >= op->src[0]->ne[2]); // need at least 4 pos per token
+ snprintf(base, 256, "kernel_rope_multi_%s", ggml_type_name(op->src[0]->type));
+ } else if (is_vision) {
+ GGML_ASSERT(op->src[1]->ne[0]*4 >= op->src[0]->ne[2]); // need at least 4 pos per token
+ snprintf(base, 256, "kernel_rope_vision_%s", ggml_type_name(op->src[0]->type));
+ } else {
+ snprintf(base, 256, "kernel_rope_norm_%s", ggml_type_name(op->src[0]->type));
+ }
+
+ snprintf(name, 256, "%s_imrope=%d", base, is_imrope ? 1 : 0);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_bool(cv, is_imrope, FC_ROPE + 0);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_im2col(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_IM2COL);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_F32);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_im2col_%s", ggml_type_name(op->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_1d(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_CONV_TRANSPOSE_1D);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->type == GGML_TYPE_F32);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_conv_transpose_1d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_2d(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_CONV_TRANSPOSE_2D);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->type == GGML_TYPE_F32);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_conv_transpose_2d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_2d(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_CONV_2D);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->type == GGML_TYPE_F32);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_conv_2d_%s_%s", ggml_type_name(op->src[0]->type), ggml_type_name(op->src[1]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_upscale(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_UPSCALE);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_upscale_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_PAD);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_pad_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (res.pipeline) {
+ return res;
+ }
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad_reflect_1d(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_PAD_REFLECT_1D);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_pad_reflect_1d_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_arange(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_ARANGE);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_arange_%s", ggml_type_name(op->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_timestep_embedding(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_TIMESTEP_EMBEDDING);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_timestep_embedding_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_adamw(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_OPT_STEP_ADAMW);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_opt_step_adamw_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_sgd(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_OPT_STEP_SGD);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_opt_step_sgd_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_memset(ggml_metal_library_t lib, const ggml_tensor * op) {
+ GGML_ASSERT(op->type == GGML_TYPE_I64);
+
+ char base[256];
+ char name[256];
+
+ snprintf(base, 256, "kernel_memset_%s", ggml_type_name(op->type));
+ snprintf(name, 256, "%s", base);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ res = ggml_metal_library_compile_pipeline(lib, base, name, nullptr);
+ }
+
+ return res;
+}
+
+ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_count_equal(ggml_metal_library_t lib, const ggml_tensor * op) {
+ assert(op->op == GGML_OP_COUNT_EQUAL);
+
+ GGML_TENSOR_LOCALS(int64_t, ne0, op->src[0], ne);
+
+ GGML_ASSERT(op->src[0]->type == op->src[1]->type);
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_I32);
+ GGML_ASSERT(op->type == GGML_TYPE_I64);
+
+ // note: the kernel only supports i32 output due to metal atomic add only supporting atomic_int
+ GGML_ASSERT(ggml_nelements(op->src[0]) < (1LL << 31));
+
+ char base[256];
+ char name[256];
+
+ int nsg = 1;
+ while (32*nsg < ne00 && nsg < 32) {
+ nsg *= 2;
+ }
+
+ snprintf(base, 256, "kernel_count_equal_%s", ggml_type_name(op->src[0]->type));
+ snprintf(name, 256, "%s_nsg=%d", base, nsg);
+
+ ggml_metal_pipeline_with_params res = ggml_metal_library_get_pipeline(lib, name);
+ if (!res.pipeline) {
+ ggml_metal_cv_t cv = ggml_metal_cv_init();
+
+ ggml_metal_cv_set_int16(cv, nsg, FC_COUNT_EQUAL + 0);
+
+ res = ggml_metal_library_compile_pipeline(lib, base, name, cv);
+
+ ggml_metal_cv_free(cv);
+ }
+
+ res.smem = 32 * sizeof(int32_t);
+ res.nsg = nsg;
+
+ return res;
+}
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.h b/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.h
new file mode 100644
index 0000000..93d7f6a
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.h
@@ -0,0 +1,290 @@
+#pragma once
+
+#include "ggml.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct ggml_metal_buffer_id {
+ void * metal; // id<MTLBuffer>
+ size_t offs;
+};
+
+typedef struct ggml_metal_device * ggml_metal_device_t;
+
+//
+// MTLFunctionConstantValues wrapper
+//
+
+typedef struct ggml_metal_cv * ggml_metal_cv_t;
+
+ggml_metal_cv_t ggml_metal_cv_init(void);
+void ggml_metal_cv_free(ggml_metal_cv_t cv);
+
+void ggml_metal_cv_set_int16(ggml_metal_cv_t cv, int16_t value, int32_t idx);
+void ggml_metal_cv_set_int32(ggml_metal_cv_t cv, int32_t value, int32_t idx);
+void ggml_metal_cv_set_bool (ggml_metal_cv_t cv, bool value, int32_t idx);
+
+//
+// MTLComputePipelineState wrapper
+//
+
+typedef struct ggml_metal_pipeline * ggml_metal_pipeline_t;
+
+ggml_metal_pipeline_t ggml_metal_pipeline_init(void);
+void ggml_metal_pipeline_free(ggml_metal_pipeline_t pipeline);
+
+// a collection of pipelines
+typedef struct ggml_metal_pipelines * ggml_metal_pipelines_t;
+
+ggml_metal_pipelines_t ggml_metal_pipelines_init(void);
+void ggml_metal_pipelines_free(ggml_metal_pipelines_t ppls);
+
+void ggml_metal_pipelines_add(ggml_metal_pipelines_t ppls, const char * name, ggml_metal_pipeline_t pipeline);
+ggml_metal_pipeline_t ggml_metal_pipelines_get(ggml_metal_pipelines_t ppls, const char * name);
+
+struct ggml_metal_pipeline_with_params {
+ ggml_metal_pipeline_t pipeline;
+
+ int nsg;
+
+ int nr0;
+ int nr1;
+
+ size_t smem;
+
+ bool c4;
+ bool cnt;
+};
+
+int ggml_metal_pipeline_max_theads_per_threadgroup(struct ggml_metal_pipeline_with_params pipeline);
+
+//
+// MTLCommandBuffer wrapper
+//
+
+typedef void * ggml_metal_cmd_buf_t;
+
+//
+// MTLComputeCommandEncoder wrapper
+//
+
+typedef struct ggml_metal_encoder * ggml_metal_encoder_t;
+
+ggml_metal_encoder_t ggml_metal_encoder_init(ggml_metal_cmd_buf_t cmd_buf_raw, bool concurrent);
+void ggml_metal_encoder_free(ggml_metal_encoder_t encoder);
+
+void ggml_metal_encoder_debug_group_push(ggml_metal_encoder_t encoder, const char * name);
+void ggml_metal_encoder_debug_group_pop (ggml_metal_encoder_t encoder);
+
+void ggml_metal_encoder_set_pipeline(ggml_metal_encoder_t encoder, struct ggml_metal_pipeline_with_params pipeline);
+
+void ggml_metal_encoder_set_bytes (ggml_metal_encoder_t encoder, void * data, size_t size, int idx);
+void ggml_metal_encoder_set_buffer(ggml_metal_encoder_t encoder, struct ggml_metal_buffer_id buffer, int idx);
+
+void ggml_metal_encoder_set_threadgroup_memory_size(ggml_metal_encoder_t encoder, size_t size, int idx);
+
+void ggml_metal_encoder_dispatch_threadgroups(ggml_metal_encoder_t encoder, int tg0, int tg1, int tg2, int tptg0, int tptg1, int tptg2);
+
+void ggml_metal_encoder_memory_barrier(ggml_metal_encoder_t encoder);
+
+void ggml_metal_encoder_end_encoding(ggml_metal_encoder_t encoder);
+
+//
+// MTLLibrary wrapper
+//
+
+typedef struct ggml_metal_library * ggml_metal_library_t;
+
+ggml_metal_library_t ggml_metal_library_init (ggml_metal_device_t dev);
+ggml_metal_library_t ggml_metal_library_init_from_source(ggml_metal_device_t dev, const char * source, bool verbose);
+
+void ggml_metal_library_free(ggml_metal_library_t lib);
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline (ggml_metal_library_t lib, const char * name);
+struct ggml_metal_pipeline_with_params ggml_metal_library_compile_pipeline(ggml_metal_library_t lib, const char * base, const char * name, ggml_metal_cv_t cv);
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_base (ggml_metal_library_t lib, enum ggml_op op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cpy (ggml_metal_library_t lib, enum ggml_type tsrc, enum ggml_type tdst);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pool_1d (ggml_metal_library_t lib, const struct ggml_tensor * op, enum ggml_op_pool op_pool);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pool_2d (ggml_metal_library_t lib, const struct ggml_tensor * op, enum ggml_op_pool op_pool);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_get_rows (ggml_metal_library_t lib, enum ggml_type tsrc);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_set_rows (ggml_metal_library_t lib, enum ggml_type tidx, enum ggml_type tdst);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_diag (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_repeat (ggml_metal_library_t lib, enum ggml_type tsrc);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_unary (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_glu (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_sum_rows (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_blk (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_cumsum_add (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_tri (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_soft_max (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_conv_batched (ggml_metal_library_t lib, const struct ggml_tensor * op, int ssm_conv_bs);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_ssm_scan (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rwkv (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_solve_tri (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_ext (ggml_metal_library_t lib, enum ggml_type tsrc0, enum ggml_type tsrc1, int nsg, int nxpsg, int r1ptg);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id_map0 (ggml_metal_library_t lib, int ne02, int ne20);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mm_id (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_mul_mv_id (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argmax (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_argsort_merge (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_top_k_merge (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin (ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t n_fuse );
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_bin_one (ggml_metal_library_t lib, enum ggml_op op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_l2_norm (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_group_norm (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_norm (ggml_metal_library_t lib, const struct ggml_tensor * op, int32_t n_fuse);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_rope (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_im2col (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_1d (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_transpose_2d (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_conv_2d (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_upscale (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_pad_reflect_1d (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_arange (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_timestep_embedding(ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_adamw (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_opt_step_sgd (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_memset (ggml_metal_library_t lib, const struct ggml_tensor * op);
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_count_equal (ggml_metal_library_t lib, const struct ggml_tensor * op);
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_pad(
+ ggml_metal_library_t lib,
+ const struct ggml_tensor * op,
+ bool has_mask,
+ int32_t ncpsg);
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_blk(
+ ggml_metal_library_t lib,
+ const struct ggml_tensor * op,
+ int32_t nqptg,
+ int32_t ncpsg);
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext(
+ ggml_metal_library_t lib,
+ const struct ggml_tensor * op,
+ bool has_mask,
+ bool has_sinks,
+ bool has_bias,
+ bool has_scap,
+ bool has_kvpad,
+ int32_t nsg);
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec(
+ ggml_metal_library_t lib,
+ const struct ggml_tensor * op,
+ bool has_mask,
+ bool has_sinks,
+ bool has_bias,
+ bool has_scap,
+ bool has_kvpad,
+ int32_t nsg,
+ int32_t nwg);
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(
+ ggml_metal_library_t lib,
+ const struct ggml_tensor * op,
+ int32_t dv,
+ int32_t nwg);
+
+// MTLResidencySet wrapper
+
+typedef void * ggml_metal_rset_t;
+
+// a collection of residency sets (non-owning)
+typedef struct ggml_metal_rsets * ggml_metal_rsets_t;
+
+ggml_metal_rsets_t ggml_metal_rsets_init(void);
+void ggml_metal_rsets_free(ggml_metal_rsets_t rsets);
+
+//
+// device
+//
+
+struct ggml_metal_device_props {
+ int device;
+ char name[128];
+ char desc[128];
+
+ size_t max_buffer_size;
+ size_t max_working_set_size;
+ size_t max_theadgroup_memory_size;
+
+ bool has_simdgroup_reduction;
+ bool has_simdgroup_mm;
+ bool has_unified_memory;
+ bool has_bfloat;
+ bool has_tensor;
+ bool use_residency_sets;
+ bool use_shared_buffers;
+
+ bool supports_gpu_family_apple7;
+
+ int op_offload_min_batch_size;
+};
+
+typedef struct ggml_metal_event * ggml_metal_event_t;
+
+void ggml_metal_event_encode_signal(ggml_metal_event_t ev, ggml_metal_cmd_buf_t cmd_buf);
+void ggml_metal_event_encode_wait (ggml_metal_event_t ev, ggml_metal_cmd_buf_t cmd_buf);
+
+ggml_metal_device_t ggml_metal_device_init(int device);
+void ggml_metal_device_free(ggml_metal_device_t dev);
+
+ggml_metal_device_t ggml_metal_device_get(int device);
+
+void * ggml_metal_device_get_obj (ggml_metal_device_t dev); // id<MTLDevice>
+void * ggml_metal_device_get_queue(ggml_metal_device_t dev); // id<MTLCommandQueue>
+
+ggml_metal_library_t ggml_metal_device_get_library(ggml_metal_device_t dev);
+
+void ggml_metal_device_rsets_add(ggml_metal_device_t dev, ggml_metal_rset_t rset);
+void ggml_metal_device_rsets_rm (ggml_metal_device_t dev, ggml_metal_rset_t rset);
+
+void ggml_metal_device_rsets_keep_alive(ggml_metal_device_t dev);
+
+ggml_metal_event_t ggml_metal_device_event_init(ggml_metal_device_t dev);
+void ggml_metal_device_event_free(ggml_metal_device_t dev, ggml_metal_event_t ev);
+void ggml_metal_device_event_synchronize(ggml_metal_device_t dev, ggml_metal_event_t ev);
+
+void ggml_metal_device_get_memory(ggml_metal_device_t dev, size_t * free, size_t * total);
+bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_tensor * op);
+
+const struct ggml_metal_device_props * ggml_metal_device_get_props(ggml_metal_device_t dev);
+
+//
+// device buffers
+//
+
+typedef struct ggml_metal_buffer * ggml_metal_buffer_t;
+
+ggml_metal_buffer_t ggml_metal_buffer_init(ggml_metal_device_t dev, size_t size, bool shared);
+ggml_metal_buffer_t ggml_metal_buffer_map (ggml_metal_device_t dev, void * ptr, size_t size, size_t max_tensor_size);
+
+void ggml_metal_buffer_free (ggml_metal_buffer_t buf);
+void * ggml_metal_buffer_get_base (ggml_metal_buffer_t buf);
+bool ggml_metal_buffer_is_shared(ggml_metal_buffer_t buf);
+
+void ggml_metal_buffer_memset_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size);
+void ggml_metal_buffer_set_tensor (ggml_metal_buffer_t buf, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size);
+void ggml_metal_buffer_get_tensor (ggml_metal_buffer_t buf, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size);
+void ggml_metal_buffer_clear (ggml_metal_buffer_t buf, uint8_t value);
+
+// finds the Metal buffer that contains the tensor data on the GPU device
+// the assumption is that there is 1-to-1 mapping between the host and device memory buffers, so we can find the
+// Metal buffer based on the host memory pointer
+//
+struct ggml_metal_buffer_id ggml_metal_buffer_get_id(ggml_metal_buffer_t buf, const struct ggml_tensor * t);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.m b/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.m
new file mode 100644
index 0000000..4ea0bfb
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-device.m
@@ -0,0 +1,1748 @@
+#import "ggml-metal-device.h"
+
+#import "ggml-impl.h"
+
+#include <Foundation/Foundation.h>
+
+#include <Metal/Metal.h>
+
+#include <stdatomic.h>
+
+#ifndef TARGET_OS_VISION
+#define TARGET_OS_VISION 0
+#endif
+
+// create residency sets only on macOS >= 15.0
+#if !TARGET_CPU_X86_64 && TARGET_OS_OSX && __MAC_OS_X_VERSION_MAX_ALLOWED >= 150000 || \
+ TARGET_OS_IOS && __IPHONE_OS_VERSION_MAX_ALLOWED >= 180000 || \
+ TARGET_OS_TV && __TV_OS_VERSION_MAX_ALLOWED >= 180000 || \
+ TARGET_OS_VISION && __VISION_OS_VERSION_MAX_ALLOWED >= 200000
+#define GGML_METAL_HAS_RESIDENCY_SETS 1
+#endif
+
+// overload of MTLGPUFamilyMetalX (not available in some environments)
+static const NSInteger MTLGPUFamilyMetal3_GGML = 5001;
+static const NSInteger MTLGPUFamilyMetal4_GGML = 5002;
+
+#if !GGML_METAL_EMBED_LIBRARY
+// Here to assist with NSBundle Path Hack
+@interface GGMLMetalClass : NSObject
+@end
+@implementation GGMLMetalClass
+@end
+#endif
+
+//
+// MTLFunctionConstantValues wrapper
+//
+
+struct ggml_metal_cv {
+ MTLFunctionConstantValues * obj;
+};
+
+ggml_metal_cv_t ggml_metal_cv_init(void) {
+ ggml_metal_cv_t res = calloc(1, sizeof(struct ggml_metal_cv));
+
+ res->obj = [[MTLFunctionConstantValues alloc] init];
+
+ return res;
+}
+
+void ggml_metal_cv_free(ggml_metal_cv_t cv) {
+ [cv->obj release];
+ free(cv);
+}
+
+void ggml_metal_cv_set_int16(ggml_metal_cv_t cv, int16_t value, int32_t idx) {
+ [cv->obj setConstantValue:&value type:MTLDataTypeShort atIndex:idx];
+}
+
+void ggml_metal_cv_set_int32(ggml_metal_cv_t cv, int32_t value, int32_t idx) {
+ [cv->obj setConstantValue:&value type:MTLDataTypeInt atIndex:idx];
+}
+
+void ggml_metal_cv_set_bool(ggml_metal_cv_t cv, bool value, int32_t idx) {
+ [cv->obj setConstantValue:&value type:MTLDataTypeBool atIndex:idx];
+}
+
+//
+// MTLComputePipelineState wrapper
+//
+
+struct ggml_metal_pipeline {
+ id<MTLComputePipelineState> obj;
+};
+
+ggml_metal_pipeline_t ggml_metal_pipeline_init(void) {
+ ggml_metal_pipeline_t res = calloc(1, sizeof(struct ggml_metal_pipeline));
+
+ *res = (struct ggml_metal_pipeline) {
+ /*.obj =*/ nil,
+ };
+
+ return res;
+}
+
+void ggml_metal_pipeline_free(ggml_metal_pipeline_t pipeline) {
+ [pipeline->obj release];
+
+ free(pipeline);
+}
+
+int ggml_metal_pipeline_max_theads_per_threadgroup(struct ggml_metal_pipeline_with_params pipeline) {
+ return pipeline.pipeline->obj.maxTotalThreadsPerThreadgroup;
+}
+
+struct ggml_metal_library {
+ id<MTLLibrary> obj;
+ id<MTLDevice> device;
+
+ ggml_metal_pipelines_t pipelines; // cache of compiled pipelines
+
+ NSLock * lock;
+};
+
+ggml_metal_library_t ggml_metal_library_init(ggml_metal_device_t dev) {
+ id<MTLLibrary> library = nil;
+ id<MTLDevice> device = ggml_metal_device_get_obj(dev);
+
+ // load library
+ //
+ // - first check if the library is embedded
+ // - then check if the library is in the bundle
+ // - if not found, load the source and compile it
+ // - if that fails, return NULL
+ //
+ // TODO: move to a function
+ {
+ const int64_t t_start = ggml_time_us();
+
+ NSError * error = nil;
+ NSString * src = nil;
+
+#if GGML_METAL_EMBED_LIBRARY
+ GGML_LOG_INFO("%s: using embedded metal library\n", __func__);
+
+ extern const char ggml_metallib_start[];
+ extern const char ggml_metallib_end[];
+
+ src = [[NSString alloc] initWithBytes:ggml_metallib_start length:(ggml_metallib_end-ggml_metallib_start) encoding:NSUTF8StringEncoding];
+#else
+
+#ifdef SWIFT_PACKAGE
+ NSBundle * bundle = SWIFTPM_MODULE_BUNDLE;
+#else
+ NSBundle * bundle = [NSBundle bundleForClass:[GGMLMetalClass class]];
+#endif
+
+ NSString * path_lib = [bundle pathForResource:@"default" ofType:@"metallib"];
+ if (path_lib == nil) {
+ // Try to find the resource in the directory where the current binary located.
+ NSString * bin_cur = [[NSProcessInfo processInfo] arguments][0];
+ NSString * bin_dir = [bin_cur stringByDeletingLastPathComponent];
+
+ NSString * path_lib_default = [NSString pathWithComponents:@[bin_dir, @"default.metallib"]];
+ if ([[NSFileManager defaultManager] isReadableFileAtPath:path_lib_default]) {
+ GGML_LOG_INFO("%s: found '%s'\n", __func__, [path_lib_default UTF8String]);
+
+ NSDictionary * atts = [[NSFileManager defaultManager] attributesOfItemAtPath:path_lib_default error:&error];
+ if (atts && atts[NSFileType] == NSFileTypeSymbolicLink) {
+ // Optionally, if this is a symlink, try to resolve it.
+ path_lib_default = [[NSFileManager defaultManager] destinationOfSymbolicLinkAtPath:path_lib_default error:&error];
+ if (path_lib_default && [path_lib_default length] > 0 && ![[path_lib_default substringToIndex:1] isEqualToString:@"/"]) {
+ // It is a relative path, adding the binary directory as directory prefix.
+ path_lib_default = [NSString pathWithComponents:@[bin_dir, path_lib_default]];
+ }
+ if (!path_lib_default || ![[NSFileManager defaultManager] isReadableFileAtPath:path_lib_default]) {
+ // Link to the resource could not be resolved.
+ path_lib_default = nil;
+ } else {
+ GGML_LOG_INFO("%s: symlink resolved '%s'\n", __func__, [path_lib_default UTF8String]);
+ }
+ }
+ } else {
+ // The resource couldn't be found in the binary's directory.
+ path_lib_default = nil;
+ }
+
+ path_lib = path_lib_default;
+ }
+
+ if (path_lib != nil) {
+ // pre-compiled library found
+ NSURL * libURL = [NSURL fileURLWithPath:path_lib];
+ GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_lib UTF8String]);
+
+ library = [device newLibraryWithURL:libURL error:&error];
+ if (error) {
+ GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+ return nil;
+ }
+ } else {
+ GGML_LOG_INFO("%s: default.metallib not found, loading from source\n", __func__);
+
+ NSString * path_source;
+ NSString * path_resource = [[NSProcessInfo processInfo].environment objectForKey:@"GGML_METAL_PATH_RESOURCES"];
+
+ GGML_LOG_INFO("%s: GGML_METAL_PATH_RESOURCES = %s\n", __func__, path_resource ? [path_resource UTF8String] : "nil");
+
+ if (path_resource) {
+ path_source = [path_resource stringByAppendingPathComponent:@"ggml-metal.metal"];
+ } else {
+ path_source = [bundle pathForResource:@"ggml-metal" ofType:@"metal"];
+ }
+
+ if (path_source == nil) {
+ GGML_LOG_WARN("%s: error: could not use bundle path to find ggml-metal.metal, falling back to trying cwd\n", __func__);
+ path_source = @"ggml-metal.metal";
+ }
+
+ GGML_LOG_INFO("%s: loading '%s'\n", __func__, [path_source UTF8String]);
+
+ src = [NSString stringWithContentsOfFile:path_source encoding:NSUTF8StringEncoding error:&error];
+ if (error) {
+ GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+ return nil;
+ }
+ }
+#endif
+
+ if (!library) {
+ @autoreleasepool {
+ // dictionary of preprocessor macros
+ NSMutableDictionary * prep = [NSMutableDictionary dictionary];
+
+ if (ggml_metal_device_get_props(dev)->has_bfloat) {
+ [prep setObject:@"1" forKey:@"GGML_METAL_HAS_BF16"];
+ }
+
+ if (ggml_metal_device_get_props(dev)->has_tensor) {
+ [prep setObject:@"1" forKey:@"GGML_METAL_HAS_TENSOR"];
+ }
+
+#if GGML_METAL_EMBED_LIBRARY
+ [prep setObject:@"1" forKey:@"GGML_METAL_EMBED_LIBRARY"];
+#endif
+
+ MTLCompileOptions * options = [MTLCompileOptions new];
+ options.preprocessorMacros = prep;
+
+ //[options setFastMathEnabled:false];
+
+ library = [device newLibraryWithSource:src options:options error:&error];
+ if (error) {
+ GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+ return nil;
+ }
+
+#if !__has_feature(objc_arc)
+ [options release];
+#endif
+ }
+ }
+
+#if GGML_METAL_EMBED_LIBRARY
+ [src release];
+#endif // GGML_METAL_EMBED_LIBRARY
+
+ GGML_LOG_INFO("%s: loaded in %.3f sec\n", __func__, (ggml_time_us() - t_start) / 1e6);
+ }
+
+ ggml_metal_library_t res = calloc(1, sizeof(struct ggml_metal_library));
+
+ res->obj = library;
+ res->device = device;
+ res->pipelines = ggml_metal_pipelines_init();
+ res->lock = [NSLock new];
+
+ return res;
+}
+
+ggml_metal_library_t ggml_metal_library_init_from_source(ggml_metal_device_t dev, const char * source, bool verbose) {
+ if (source == NULL) {
+ GGML_LOG_ERROR("%s: source is NULL\n", __func__);
+ return NULL;
+ }
+
+ id<MTLDevice> device = ggml_metal_device_get_obj(dev);
+ id<MTLLibrary> library = nil;
+ NSError * error = nil;
+
+ const int64_t t_start = ggml_time_us();
+
+ NSString * src = [[NSString alloc] initWithBytes:source
+ length:strlen(source)
+ encoding:NSUTF8StringEncoding];
+ if (!src) {
+ GGML_LOG_ERROR("%s: failed to create NSString from source\n", __func__);
+ return NULL;
+ }
+
+ @autoreleasepool {
+ NSMutableDictionary * prep = [NSMutableDictionary dictionary];
+
+ MTLCompileOptions * options = [MTLCompileOptions new];
+ options.preprocessorMacros = prep;
+
+ library = [device newLibraryWithSource:src options:options error:&error];
+ if (error) {
+ if (verbose) {
+ GGML_LOG_ERROR("%s: error compiling source: %s\n", __func__, [[error description] UTF8String]);
+ } else {
+ GGML_LOG_ERROR("%s: error compiling source\n", __func__);
+ }
+ library = nil;
+ }
+
+ [options release];
+ }
+
+ [src release];
+
+ if (!library) {
+ if (verbose) {
+ GGML_LOG_ERROR("%s: failed to create Metal library from source\n", __func__);
+ }
+
+ return NULL;
+ }
+
+ if (verbose) {
+ GGML_LOG_INFO("%s: compiled in %.3f sec\n", __func__, (ggml_time_us() - t_start) / 1e6);
+ }
+
+ ggml_metal_library_t res = calloc(1, sizeof(struct ggml_metal_library));
+ if (!res) {
+ GGML_LOG_ERROR("%s: calloc failed\n", __func__);
+ return NULL;
+ }
+
+ res->obj = library;
+ res->device = device;
+ res->pipelines = ggml_metal_pipelines_init();
+ res->lock = [NSLock new];
+
+ return res;
+}
+
+void ggml_metal_library_free(ggml_metal_library_t lib) {
+ if (!lib) {
+ return;
+ }
+
+ if (lib->obj) {
+ [lib->obj release];
+ }
+
+ ggml_metal_pipelines_free(lib->pipelines);
+
+ [lib->lock release];
+
+ free(lib);
+}
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_get_pipeline(ggml_metal_library_t lib, const char * name) {
+ [lib->lock lock];
+
+ struct ggml_metal_pipeline_with_params res = {
+ /*.pipeline =*/ nil,
+ /*.nsg =*/ 0,
+ /*.nr0 =*/ 0,
+ /*.nr1 =*/ 0,
+ /*.smem =*/ 0,
+ /*.c4 =*/ false,
+ /*.cnt =*/ false,
+ };
+
+ res.pipeline = ggml_metal_pipelines_get(lib->pipelines, name);
+
+ [lib->lock unlock];
+
+ return res;
+}
+
+struct ggml_metal_pipeline_with_params ggml_metal_library_compile_pipeline(ggml_metal_library_t lib, const char * base, const char * name, ggml_metal_cv_t cv) {
+ struct ggml_metal_pipeline_with_params res = {
+ /*.pipeline =*/ nil,
+ /*.nsg =*/ 0,
+ /*.nr0 =*/ 0,
+ /*.nr1 =*/ 0,
+ /*.smem =*/ 0,
+ /*.c4 =*/ false,
+ /*.cnt =*/ false,
+ };
+
+ [lib->lock lock];
+
+ res.pipeline = ggml_metal_pipelines_get(lib->pipelines, name);
+ if (res.pipeline) {
+ [lib->lock unlock];
+
+ return res;
+ }
+
+ @autoreleasepool {
+ NSError * error = nil;
+
+ NSString * base_func = [NSString stringWithUTF8String:base];
+
+ GGML_LOG_DEBUG("%s: compiling pipeline: base = '%s', name = '%s'\n", __func__, base, name);
+
+ id<MTLFunction> mtl_function;
+ if (!cv) {
+ mtl_function = [lib->obj newFunctionWithName:base_func];
+ } else {
+ mtl_function = [lib->obj newFunctionWithName:base_func constantValues:cv->obj error:&error];
+ }
+ if (!mtl_function) {
+ [lib->lock unlock];
+
+ GGML_LOG_ERROR("%s: failed to compile pipeline: base = '%s', name = '%s'\n", __func__, base, name);
+ if (error) {
+ GGML_LOG_ERROR("%s: %s\n", __func__, [[error description] UTF8String]);
+ }
+
+ return res;
+ }
+
+ id<MTLComputePipelineState> obj = [lib->device newComputePipelineStateWithFunction:mtl_function error:&error];
+
+ [mtl_function release];
+
+ if (!obj) {
+ [lib->lock unlock];
+
+ GGML_LOG_ERROR("%s: failed to create pipeline state: base = '%s', name = '%s'\n", __func__, base, name);
+ if (error) {
+ GGML_LOG_ERROR("%s: %s\n", __func__, [[error description] UTF8String]);
+ }
+
+ return res;
+ }
+
+ GGML_LOG_DEBUG("%s: loaded %-40s %16p | th_max = %4d | th_width = %4d\n", __func__, name,
+ (void *) obj,
+ (int) obj.maxTotalThreadsPerThreadgroup,
+ (int) obj.threadExecutionWidth);
+
+ if (obj.maxTotalThreadsPerThreadgroup == 0 || obj.threadExecutionWidth == 0) {
+ [obj release];
+
+ [lib->lock unlock];
+
+ GGML_LOG_ERROR("%s: incompatible pipeline %s\n", __func__, name);
+
+ return res;
+ }
+
+ res.pipeline = ggml_metal_pipeline_init();
+ res.pipeline->obj = obj;
+
+ ggml_metal_pipelines_add(lib->pipelines, name, res.pipeline);
+ }
+
+ [lib->lock unlock];
+
+ return res;
+}
+
+//
+// MTLComputeCommandEncoder wrapper
+//
+
+struct ggml_metal_encoder {
+ id<MTLComputeCommandEncoder> obj;
+};
+
+ggml_metal_encoder_t ggml_metal_encoder_init(ggml_metal_cmd_buf_t cmd_buf_raw, bool concurrent) {
+ ggml_metal_encoder_t res = calloc(1, sizeof(struct ggml_metal_encoder));
+
+ id<MTLCommandBuffer> cmd_buf = (id<MTLCommandBuffer>) cmd_buf_raw;
+
+ if (concurrent) {
+ res->obj = [cmd_buf computeCommandEncoderWithDispatchType: MTLDispatchTypeConcurrent];
+ } else {
+ res->obj = [cmd_buf computeCommandEncoder];
+ }
+
+ [res->obj retain];
+
+ return res;
+}
+
+void ggml_metal_encoder_free(ggml_metal_encoder_t encoder) {
+ [encoder->obj release];
+ free(encoder);
+}
+
+void ggml_metal_encoder_debug_group_push(ggml_metal_encoder_t encoder, const char * name) {
+ [encoder->obj pushDebugGroup:[NSString stringWithCString:name encoding:NSUTF8StringEncoding]];
+}
+
+void ggml_metal_encoder_debug_group_pop (ggml_metal_encoder_t encoder) {
+ [encoder->obj popDebugGroup];
+}
+
+void ggml_metal_encoder_set_pipeline(ggml_metal_encoder_t encoder, struct ggml_metal_pipeline_with_params pipeline) {
+ [encoder->obj setComputePipelineState:pipeline.pipeline->obj];
+}
+
+void ggml_metal_encoder_set_bytes(ggml_metal_encoder_t encoder, void * data, size_t size, int idx) {
+ [encoder->obj setBytes:data length:size atIndex:idx];
+}
+
+void ggml_metal_encoder_set_buffer(ggml_metal_encoder_t encoder, struct ggml_metal_buffer_id buffer, int idx) {
+ [encoder->obj setBuffer:buffer.metal offset:buffer.offs atIndex:idx];
+}
+
+void ggml_metal_encoder_set_threadgroup_memory_size(ggml_metal_encoder_t encoder, size_t size, int idx) {
+ [encoder->obj setThreadgroupMemoryLength:size atIndex:idx];
+}
+
+void ggml_metal_encoder_dispatch_threadgroups(ggml_metal_encoder_t encoder, int tg0, int tg1, int tg2, int tptg0, int tptg1, int tptg2) {
+ [encoder->obj dispatchThreadgroups:MTLSizeMake(tg0, tg1, tg2) threadsPerThreadgroup:MTLSizeMake(tptg0, tptg1, tptg2)];
+}
+
+void ggml_metal_encoder_memory_barrier(ggml_metal_encoder_t encoder) {
+ [encoder->obj memoryBarrierWithScope:MTLBarrierScopeBuffers];
+}
+
+void ggml_metal_encoder_end_encoding(ggml_metal_encoder_t encoder) {
+ [encoder->obj endEncoding];
+}
+
+struct ggml_metal_device {
+ id<MTLDevice> mtl_device;
+
+ // a single global queue shared by all Metal backends
+ // technically not needed for devices with unified memory, but enables discrete GPUs support
+ // ref: https://github.com/ggml-org/llama.cpp/pull/15906
+ id<MTLCommandQueue> mtl_queue;
+
+ ggml_metal_rsets_t rsets;
+
+ ggml_metal_library_t library;
+
+ struct ggml_metal_device_props props;
+
+ // virtual address for GPU memory allocations
+ atomic_uintptr_t addr_virt;
+};
+
+//
+// MTLResidenceSet wrapper
+//
+
+struct ggml_metal_rsets {
+ NSLock * lock;
+
+ NSMutableArray * data;
+
+ // number of seconds since the last graph computation
+ // keep the residency sets wired for that amount of time to avoid being collected by the OS
+ int keep_alive_s;
+
+ // background heartbeat thread to keep the residency sets alive
+ atomic_bool d_stop;
+ atomic_int d_loop;
+
+ dispatch_group_t d_group;
+};
+
+ggml_metal_rsets_t ggml_metal_rsets_init(void) {
+ ggml_metal_rsets_t res = calloc(1, sizeof(struct ggml_metal_rsets));
+
+ res->lock = [[NSLock alloc] init];
+ res->data = [[NSMutableArray alloc] init];
+
+ // by default keep the memory wired for 3 minutes
+ res->keep_alive_s = 3*60;
+
+ const char * GGML_METAL_RESIDENCY_KEEP_ALIVE_S = getenv("GGML_METAL_RESIDENCY_KEEP_ALIVE_S");
+ if (GGML_METAL_RESIDENCY_KEEP_ALIVE_S) {
+ res->keep_alive_s = atoi(GGML_METAL_RESIDENCY_KEEP_ALIVE_S);
+ }
+
+ if (res->keep_alive_s <= 0) {
+ res->keep_alive_s = 3*60;
+ }
+
+ GGML_LOG_INFO("%s: creating a residency set collection (keep_alive = %d s)\n", __func__, res->keep_alive_s);
+
+ atomic_store_explicit(&res->d_stop, false, memory_order_relaxed);
+ atomic_store_explicit(&res->d_loop, 2*res->keep_alive_s, memory_order_relaxed);
+
+ res->d_group = dispatch_group_create();
+
+ // start a background thread that periodically requests residency for all the currently active sets in the collection
+ // the requests stop after a certain amount of time (keep_alive_s) of inactivity
+ dispatch_queue_t d_queue = dispatch_get_global_queue(QOS_CLASS_DEFAULT, 0);
+ dispatch_group_async(res->d_group, d_queue, ^{
+#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
+ if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) {
+ while (!atomic_load_explicit(&res->d_stop, memory_order_relaxed)) {
+ if (atomic_load_explicit(&res->d_loop, memory_order_relaxed) > 0) {
+ [res->lock lock];
+
+ for (int i = 0; i < (int) res->data.count; ++i) {
+ [res->data[i] requestResidency];
+ }
+
+ atomic_fetch_sub_explicit(&res->d_loop, 1, memory_order_relaxed);
+
+ [res->lock unlock];
+ }
+
+ // half a second
+ usleep(500 * 1000);
+ }
+ }
+#endif
+ });
+
+ return res;
+}
+
+void ggml_metal_rsets_free(ggml_metal_rsets_t rsets) {
+ if (rsets == NULL) {
+ return;
+ }
+
+ // note: if you hit this assert, most likely you haven't deallocated all Metal resources before exiting
+ GGML_ASSERT([rsets->data count] == 0);
+
+ atomic_store_explicit(&rsets->d_stop, true, memory_order_relaxed);
+
+ dispatch_group_wait(rsets->d_group, DISPATCH_TIME_FOREVER);
+ dispatch_release(rsets->d_group);
+
+ [rsets->data release];
+ [rsets->lock release];
+
+ free(rsets);
+}
+
+ggml_metal_device_t ggml_metal_device_init(int device) {
+ ggml_metal_device_t dev = calloc(1, sizeof(struct ggml_metal_device));
+
+ assert(dev != NULL);
+
+ if (dev->mtl_device == nil) {
+ dev->mtl_device = MTLCreateSystemDefaultDevice();
+
+ if (dev->mtl_device) {
+ dev->mtl_queue = [dev->mtl_device newCommandQueue];
+ if (dev->mtl_queue == nil) {
+ GGML_LOG_ERROR("%s: error: failed to create command queue\n", __func__);
+ }
+
+ dev->addr_virt = 0x000000400ULL;
+
+ dev->props.device = device;
+ dev->props.has_simdgroup_reduction = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7];
+ dev->props.has_simdgroup_reduction |= [dev->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML];
+
+ dev->props.has_simdgroup_mm = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7];
+ dev->props.has_unified_memory = dev->mtl_device.hasUnifiedMemory;
+
+ dev->props.has_bfloat = [dev->mtl_device supportsFamily:MTLGPUFamilyMetal3_GGML];
+ dev->props.has_bfloat |= [dev->mtl_device supportsFamily:MTLGPUFamilyApple6];
+ if (getenv("GGML_METAL_BF16_DISABLE") != NULL) {
+ dev->props.has_bfloat = false;
+ }
+
+ dev->props.has_tensor = [dev->mtl_device supportsFamily:MTLGPUFamilyMetal4_GGML];
+ if (getenv("GGML_METAL_TENSOR_DISABLE") != NULL) {
+ dev->props.has_tensor = false;
+ }
+
+ // note: disable the tensor API by default for old chips because with the current implementation it is not useful
+ // - M2 Ultra: ~5% slower
+ // - M4, M4 Max: no significant difference
+ //
+ // TODO: try to update the tensor API kernels to at least match the simdgroup performance
+ if (getenv("GGML_METAL_TENSOR_ENABLE") == NULL &&
+ ![[dev->mtl_device name] containsString:@"M5"] &&
+ ![[dev->mtl_device name] containsString:@"M6"] &&
+ ![[dev->mtl_device name] containsString:@"A19"] &&
+ ![[dev->mtl_device name] containsString:@"A20"]) {
+ GGML_LOG_WARN("%s: tensor API disabled for pre-M5 and pre-A19 devices\n", __func__);
+ dev->props.has_tensor = false;
+ }
+
+ // double-check that the tensor API compiles
+ if (dev->props.has_tensor) {
+ const char * src_tensor_f16 = "\n"
+ "#include <metal_stdlib> \n"
+ "#include <metal_tensor> \n"
+ "#include <MetalPerformancePrimitives/MetalPerformancePrimitives.h> \n"
+ " \n"
+ "using namespace metal; \n"
+ "using namespace mpp::tensor_ops; \n"
+ " \n"
+ "kernel void dummy_kernel( \n"
+ " tensor<device half, dextents<int32_t, 2>> A [[buffer(0)]], \n"
+ " tensor<device half, dextents<int32_t, 2>> B [[buffer(1)]], \n"
+ " device float * C [[buffer(2)]], \n"
+ " uint2 tgid [[threadgroup_position_in_grid]]) \n"
+ "{ \n"
+ " auto tA = A.slice(0, (int)tgid.y); \n"
+ " auto tB = B.slice((int)tgid.x, 0); \n"
+ " \n"
+ " matmul2d< \n"
+ " matmul2d_descriptor(8, 8, dynamic_extent), \n"
+ " execution_simdgroups<4>> mm; \n"
+ " \n"
+ " auto cT = mm.get_destination_cooperative_tensor<decltype(tA), decltype(tB), float>(); \n"
+ " \n"
+ " auto sA = tA.slice(0, 0); \n"
+ " auto sB = tB.slice(0, 0); \n"
+ " mm.run(sB, sA, cT); \n"
+ " \n"
+ " auto tC = tensor<device float, dextents<int32_t, 2>, tensor_inline>(C, dextents<int32_t, 2>(4, 4)); \n"
+ " \n"
+ " cT.store(tC); \n"
+ "}";
+
+ GGML_LOG_INFO("%s: testing tensor API for f16 support\n", __func__);
+ ggml_metal_library_t lib = ggml_metal_library_init_from_source(dev, src_tensor_f16, false);
+ if (lib == NULL) {
+ GGML_LOG_WARN("%s: - the tensor API is not supported in this environment - disabling\n", __func__);
+ dev->props.has_tensor = false;
+ } else {
+ struct ggml_metal_pipeline_with_params ppl = ggml_metal_library_compile_pipeline(lib, "dummy_kernel", "dummy_kernel", nil);
+ if (!ppl.pipeline) {
+ GGML_LOG_WARN("%s: - the tensor API is not supported in this environment - disabling\n", __func__);
+ dev->props.has_tensor = false;
+ }
+
+ ggml_metal_library_free(lib);
+ }
+ }
+
+ // try to compile a dummy kernel to determine if the tensor API is supported for bfloat
+ if (dev->props.has_tensor && dev->props.has_bfloat) {
+ const char * src_tensor_bf16 = "\n"
+ "#include <metal_stdlib> \n"
+ "#include <metal_tensor> \n"
+ "#include <MetalPerformancePrimitives/MetalPerformancePrimitives.h> \n"
+ " \n"
+ "using namespace metal; \n"
+ "using namespace mpp::tensor_ops; \n"
+ " \n"
+ "kernel void dummy_kernel( \n"
+ " tensor<device bfloat, dextents<int32_t, 2>> A [[buffer(0)]], \n"
+ " tensor<device bfloat, dextents<int32_t, 2>> B [[buffer(1)]], \n"
+ " device float * C [[buffer(2)]], \n"
+ " uint2 tgid [[threadgroup_position_in_grid]]) \n"
+ "{ \n"
+ " auto tA = A.slice(0, (int)tgid.y); \n"
+ " auto tB = B.slice((int)tgid.x, 0); \n"
+ " \n"
+ " matmul2d< \n"
+ " matmul2d_descriptor(8, 8, dynamic_extent), \n"
+ " execution_simdgroups<4>> mm; \n"
+ " \n"
+ " auto cT = mm.get_destination_cooperative_tensor<decltype(tA), decltype(tB), float>(); \n"
+ " \n"
+ " auto sA = tA.slice(0, 0); \n"
+ " auto sB = tB.slice(0, 0); \n"
+ " mm.run(sB, sA, cT); \n"
+ " \n"
+ " auto tC = tensor<device float, dextents<int32_t, 2>, tensor_inline>(C, dextents<int32_t, 2>(4, 4)); \n"
+ " \n"
+ " cT.store(tC); \n"
+ "}";
+
+ GGML_LOG_INFO("%s: testing tensor API for bfloat support\n", __func__);
+ ggml_metal_library_t lib = ggml_metal_library_init_from_source(dev, src_tensor_bf16, false);
+ if (lib == NULL) {
+ GGML_LOG_WARN("%s: - the tensor API does not support bfloat - disabling bfloat support\n", __func__);
+ dev->props.has_bfloat = false;
+ } else {
+ struct ggml_metal_pipeline_with_params ppl = ggml_metal_library_compile_pipeline(lib, "dummy_kernel", "dummy_kernel", nil);
+ if (!ppl.pipeline) {
+ GGML_LOG_WARN("%s: - the tensor API does not support bfloat - disabling bfloat support\n", __func__);
+ dev->props.has_bfloat = false;
+ }
+
+ ggml_metal_library_free(lib);
+ }
+ }
+
+ dev->props.use_residency_sets = true;
+#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
+ dev->props.use_residency_sets = getenv("GGML_METAL_NO_RESIDENCY") == nil;
+#endif
+
+ dev->props.use_shared_buffers = dev->props.has_unified_memory;
+#if TARGET_OS_OSX
+ // In case of eGPU, shared memory may be preferable.
+ dev->props.use_shared_buffers |= [dev->mtl_device location] == MTLDeviceLocationExternal;
+#endif
+ if (getenv("GGML_METAL_SHARED_BUFFERS_DISABLE") != NULL) {
+ dev->props.use_shared_buffers = false;
+ }
+ if (getenv("GGML_METAL_SHARED_BUFFERS_ENABLE") != NULL) {
+ dev->props.use_shared_buffers = true;
+ }
+
+ dev->props.supports_gpu_family_apple7 = [dev->mtl_device supportsFamily:MTLGPUFamilyApple7];
+
+ dev->props.op_offload_min_batch_size = getenv("GGML_OP_OFFLOAD_MIN_BATCH") ? atoi(getenv("GGML_OP_OFFLOAD_MIN_BATCH")) : 32;
+
+ dev->props.max_buffer_size = dev->mtl_device.maxBufferLength;
+ dev->props.max_theadgroup_memory_size = dev->mtl_device.maxThreadgroupMemoryLength;
+ if (@available(macOS 10.12, iOS 16.0, *)) {
+ dev->props.max_working_set_size = dev->mtl_device.recommendedMaxWorkingSetSize;
+ } else {
+ dev->props.max_working_set_size = dev->mtl_device.maxBufferLength;
+ }
+
+ snprintf(dev->props.name, sizeof(dev->props.name), "%s%d", "MTL", device);
+ snprintf(dev->props.desc, sizeof(dev->props.desc), "%s", [[dev->mtl_device name] UTF8String]);
+
+ dev->library = ggml_metal_library_init(dev);
+ if (!dev->library) {
+ GGML_LOG_ERROR("%s: error: failed to create library\n", __func__);
+ }
+
+ if (dev->props.use_residency_sets) {
+ dev->rsets = ggml_metal_rsets_init();
+ } else {
+ dev->rsets = nil;
+ }
+
+ // print MTL GPU family:
+ GGML_LOG_INFO("%s: GPU name: %s\n", __func__, dev->props.name);
+
+ // determine max supported GPU family
+ // https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
+ // https://developer.apple.com/metal/Metal-Feature-Set-Tables.pdf
+ {
+ for (int i = MTLGPUFamilyApple1 + 20; i >= MTLGPUFamilyApple1; --i) {
+ if ([dev->mtl_device supportsFamily:i]) {
+ GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyApple%d (%d)\n", __func__, i - (int) MTLGPUFamilyApple1 + 1, i);
+ break;
+ }
+ }
+
+ for (int i = MTLGPUFamilyCommon1 + 5; i >= MTLGPUFamilyCommon1; --i) {
+ if ([dev->mtl_device supportsFamily:i]) {
+ GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyCommon%d (%d)\n", __func__, i - (int) MTLGPUFamilyCommon1 + 1, i);
+ break;
+ }
+ }
+
+ for (int i = MTLGPUFamilyMetal3_GGML + 5; i >= MTLGPUFamilyMetal3_GGML; --i) {
+ if ([dev->mtl_device supportsFamily:i]) {
+ GGML_LOG_INFO("%s: GPU family: MTLGPUFamilyMetal%d (%d)\n", __func__, i - (int) MTLGPUFamilyMetal3_GGML + 3, i);
+ break;
+ }
+ }
+ }
+
+ GGML_LOG_INFO("%s: simdgroup reduction = %s\n", __func__, dev->props.has_simdgroup_reduction ? "true" : "false");
+ GGML_LOG_INFO("%s: simdgroup matrix mul. = %s\n", __func__, dev->props.has_simdgroup_mm ? "true" : "false");
+ GGML_LOG_INFO("%s: has unified memory = %s\n", __func__, dev->props.has_unified_memory ? "true" : "false");
+ GGML_LOG_INFO("%s: has bfloat = %s\n", __func__, dev->props.has_bfloat ? "true" : "false");
+ GGML_LOG_INFO("%s: has tensor = %s\n", __func__, dev->props.has_tensor ? "true" : "false");
+ GGML_LOG_INFO("%s: use residency sets = %s\n", __func__, dev->props.use_residency_sets ? "true" : "false");
+ GGML_LOG_INFO("%s: use shared buffers = %s\n", __func__, dev->props.use_shared_buffers ? "true" : "false");
+
+#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
+ if (@available(macOS 10.12, iOS 16.0, *)) {
+ GGML_LOG_INFO("%s: recommendedMaxWorkingSetSize = %8.2f MB\n", __func__, dev->props.max_working_set_size / 1e6);
+ }
+#endif
+ }
+ }
+
+ return dev;
+}
+
+void ggml_metal_device_free(ggml_metal_device_t dev) {
+ assert(dev != NULL);
+
+ ggml_metal_rsets_free(dev->rsets);
+
+ ggml_metal_library_free(dev->library);
+ dev->library = NULL;
+
+ if (dev->mtl_queue) {
+ [dev->mtl_queue release];
+ dev->mtl_queue = nil;
+ }
+
+ if (dev->mtl_device) {
+ [dev->mtl_device release];
+ dev->mtl_device = nil;
+ }
+
+ free(dev);
+}
+
+void * ggml_metal_device_get_obj(ggml_metal_device_t dev) {
+ return dev->mtl_device;
+}
+
+void * ggml_metal_device_get_queue(ggml_metal_device_t dev) {
+ return dev->mtl_queue;
+}
+
+ggml_metal_library_t ggml_metal_device_get_library(ggml_metal_device_t dev) {
+ return dev->library;
+}
+
+void ggml_metal_device_rsets_add(ggml_metal_device_t dev, ggml_metal_rset_t rset) {
+ if (rset == nil) {
+ return;
+ }
+
+ GGML_ASSERT(dev->rsets);
+
+ [dev->rsets->lock lock];
+
+ [dev->rsets->data addObject:rset];
+
+ [dev->rsets->lock unlock];
+}
+
+void ggml_metal_device_rsets_rm(ggml_metal_device_t dev, ggml_metal_rset_t rset) {
+ if (rset == nil) {
+ return;
+ }
+
+ GGML_ASSERT(dev->rsets);
+
+ [dev->rsets->lock lock];
+
+ [dev->rsets->data removeObject:rset];
+
+ [dev->rsets->lock unlock];
+}
+
+void ggml_metal_device_rsets_keep_alive(ggml_metal_device_t dev) {
+ if (dev->rsets == NULL) {
+ return;
+ }
+
+ atomic_store_explicit(&dev->rsets->d_loop, 2*dev->rsets->keep_alive_s, memory_order_relaxed);
+}
+
+struct ggml_metal_event {
+ void * obj; // id<MTLEvent>
+
+ atomic_int value;
+};
+
+void ggml_metal_event_encode_signal(ggml_metal_event_t ev, ggml_metal_cmd_buf_t cmd_buf_raw) {
+ id<MTLEvent> event = (id<MTLEvent>)ev->obj;
+
+ id<MTLCommandBuffer> cmd_buf = (id<MTLCommandBuffer>) cmd_buf_raw;
+
+ [cmd_buf encodeSignalEvent:event value:atomic_fetch_add_explicit(&ev->value, 1, memory_order_relaxed) + 1];
+}
+
+void ggml_metal_event_encode_wait(ggml_metal_event_t ev, ggml_metal_cmd_buf_t cmd_buf_raw) {
+ id<MTLEvent> event = (id<MTLEvent>)ev->obj;
+
+ id<MTLCommandBuffer> cmd_buf = (id<MTLCommandBuffer>) cmd_buf_raw;
+
+ [cmd_buf encodeWaitForEvent:event value:atomic_load_explicit(&ev->value, memory_order_relaxed)];
+}
+
+ggml_metal_event_t ggml_metal_device_event_init(ggml_metal_device_t dev) {
+ id<MTLEvent> event = [dev->mtl_device newEvent];
+
+ ggml_metal_event_t ev = calloc(1, sizeof(struct ggml_metal_event));
+
+ ev->obj = (__bridge void *)event;
+ ev->value = 0;
+
+ return ev;
+}
+
+void ggml_metal_device_event_free(ggml_metal_device_t dev, ggml_metal_event_t ev) {
+ id<MTLEvent> event = ev->obj;
+ [event release];
+
+ free(ev);
+
+ GGML_UNUSED(dev);
+}
+
+void ggml_metal_device_event_synchronize(ggml_metal_device_t dev, ggml_metal_event_t ev) {
+ @autoreleasepool {
+ id<MTLEvent> event = ev->obj;
+
+ id<MTLCommandBuffer> cmd_buf = [dev->mtl_queue commandBuffer];
+ [cmd_buf encodeWaitForEvent:event value:atomic_load_explicit(&ev->value, memory_order_relaxed)];
+ [cmd_buf commit];
+ [cmd_buf waitUntilCompleted];
+ }
+}
+
+void ggml_metal_device_get_memory(ggml_metal_device_t dev, size_t * free, size_t * total) {
+ if (@available(macOS 10.12, iOS 16.0, *)) {
+ *total = dev->mtl_device.recommendedMaxWorkingSetSize;
+ *free = *total - dev->mtl_device.currentAllocatedSize;
+ } else {
+ *free = 0;
+ *total = 0;
+ }
+}
+
+bool ggml_metal_device_supports_op(ggml_metal_device_t dev, const struct ggml_tensor * op) {
+ const bool has_simdgroup_mm = dev->props.has_simdgroup_mm;
+ const bool has_simdgroup_reduction = dev->props.has_simdgroup_reduction;
+ const bool has_bfloat = dev->props.has_bfloat;
+
+ if (!has_bfloat) {
+ if (op->type == GGML_TYPE_BF16) {
+ return false;
+ }
+
+ for (size_t i = 0, n = 3; i < n; ++i) {
+ if (op->src[i] != NULL && op->src[i]->type == GGML_TYPE_BF16) {
+ return false;
+ }
+ }
+ }
+
+ switch (op->op) {
+ case GGML_OP_SCALE:
+ case GGML_OP_FILL:
+ case GGML_OP_CLAMP:
+ case GGML_OP_SQR:
+ case GGML_OP_SQRT:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_LOG:
+ return ggml_is_contiguous_rows(op->src[0]) && (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16);
+ case GGML_OP_UNARY:
+ switch (ggml_get_unary_op(op)) {
+ case GGML_UNARY_OP_TANH:
+ case GGML_UNARY_OP_RELU:
+ case GGML_UNARY_OP_SIGMOID:
+ case GGML_UNARY_OP_GELU:
+ case GGML_UNARY_OP_GELU_ERF:
+ case GGML_UNARY_OP_GELU_QUICK:
+ case GGML_UNARY_OP_SILU:
+ case GGML_UNARY_OP_ELU:
+ case GGML_UNARY_OP_NEG:
+ case GGML_UNARY_OP_ABS:
+ case GGML_UNARY_OP_SGN:
+ case GGML_UNARY_OP_STEP:
+ case GGML_UNARY_OP_HARDSWISH:
+ case GGML_UNARY_OP_HARDSIGMOID:
+ case GGML_UNARY_OP_EXP:
+ case GGML_UNARY_OP_SOFTPLUS:
+ case GGML_UNARY_OP_EXPM1:
+ return ggml_is_contiguous_rows(op->src[0]) && (op->src[0]->type == GGML_TYPE_F32 || op->src[0]->type == GGML_TYPE_F16);
+ default:
+ return false;
+ }
+ case GGML_OP_GLU:
+ switch (ggml_get_glu_op(op)) {
+ case GGML_GLU_OP_REGLU:
+ case GGML_GLU_OP_GEGLU:
+ case GGML_GLU_OP_SWIGLU:
+ case GGML_GLU_OP_SWIGLU_OAI:
+ case GGML_GLU_OP_GEGLU_ERF:
+ case GGML_GLU_OP_GEGLU_QUICK:
+ return ggml_is_contiguous_1(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
+ default:
+ return false;
+ }
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_PERMUTE:
+ case GGML_OP_CONCAT:
+ return true;
+ case GGML_OP_ADD:
+ case GGML_OP_SUB:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ case GGML_OP_ADD_ID:
+ return ggml_is_contiguous_rows(op->src[0]) && ggml_is_contiguous_rows(op->src[1]) && op->src[0]->type == GGML_TYPE_F32;
+ case GGML_OP_ACC:
+ case GGML_OP_REPEAT:
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ return true;
+ case GGML_OP_CONV_TRANSPOSE_2D:
+ return ggml_is_contiguous(op->src[0]) && ggml_is_contiguous(op->src[1]) &&
+ (op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32) &&
+ op->src[1]->type == GGML_TYPE_F32 &&
+ op->type == GGML_TYPE_F32;
+ case GGML_OP_SUM:
+ return has_simdgroup_reduction && ggml_is_contiguous(op->src[0]);
+ case GGML_OP_TRI:
+ return ggml_is_contiguous_rows(op->src[0]);
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_CUMSUM:
+ case GGML_OP_MEAN:
+ case GGML_OP_SOFT_MAX:
+ case GGML_OP_GROUP_NORM:
+ case GGML_OP_L2_NORM:
+ return has_simdgroup_reduction && ggml_is_contiguous_rows(op->src[0]);
+ case GGML_OP_COUNT_EQUAL:
+ return has_simdgroup_reduction &&
+ op->src[0]->type == GGML_TYPE_I32 &&
+ op->src[1]->type == GGML_TYPE_I32 &&
+ op->type == GGML_TYPE_I64;
+ case GGML_OP_ARGMAX:
+ return has_simdgroup_reduction;
+ case GGML_OP_NORM:
+ case GGML_OP_RMS_NORM:
+ return has_simdgroup_reduction && (ggml_is_contiguous_rows(op->src[0]));
+ case GGML_OP_ROPE:
+ return true;
+ case GGML_OP_IM2COL:
+ return ggml_is_contiguous(op->src[1]) && op->src[1]->type == GGML_TYPE_F32 && (op->type == GGML_TYPE_F16 || op->type == GGML_TYPE_F32);
+ case GGML_OP_CONV_2D:
+ return ggml_is_contiguous(op->src[0]) &&
+ op->src[1]->type == GGML_TYPE_F32 &&
+ op->type == GGML_TYPE_F32 &&
+ (op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32);
+ case GGML_OP_UPSCALE:
+ return op->src[0]->type == GGML_TYPE_F32 && op->op_params[0] == GGML_SCALE_MODE_NEAREST && !(op->op_params[0] & GGML_SCALE_FLAG_ANTIALIAS);
+ case GGML_OP_POOL_1D:
+ return ggml_is_contiguous(op->src[0]) && op->src[0]->type == GGML_TYPE_F32;
+ case GGML_OP_POOL_2D:
+ return op->src[0]->type == GGML_TYPE_F32;
+ case GGML_OP_PAD:
+ // TODO: add circular padding support for metal, see https://github.com/ggml-org/llama.cpp/pull/16985
+ if (ggml_get_op_params_i32(op, 8) != 0) {
+ return false;
+ }
+
+ return (ggml_get_op_params_i32(op, 0) == 0) && (ggml_get_op_params_i32(op, 2) == 0) &&
+ (ggml_get_op_params_i32(op, 4) == 0) && (ggml_get_op_params_i32(op, 6) == 0);
+ case GGML_OP_PAD_REFLECT_1D:
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ case GGML_OP_LEAKY_RELU:
+ return op->src[0]->type == GGML_TYPE_F32;
+ case GGML_OP_ARGSORT:
+ case GGML_OP_TOP_K:
+ case GGML_OP_ARANGE:
+ return true;
+ case GGML_OP_FLASH_ATTN_EXT:
+ // for new head sizes, add checks here
+ if (op->src[0]->ne[0] != 32 &&
+ op->src[0]->ne[0] != 40 &&
+ op->src[0]->ne[0] != 48 &&
+ op->src[0]->ne[0] != 64 &&
+ op->src[0]->ne[0] != 72 &&
+ op->src[0]->ne[0] != 80 &&
+ op->src[0]->ne[0] != 96 &&
+ op->src[0]->ne[0] != 112 &&
+ op->src[0]->ne[0] != 128 &&
+ op->src[0]->ne[0] != 192 &&
+ op->src[0]->ne[0] != 256 &&
+ op->src[0]->ne[0] != 576) {
+ return false;
+ }
+ if (op->src[1]->type != op->src[2]->type) {
+ return false;
+ }
+ return has_simdgroup_mm; // TODO: over-restricted for vec-kernels
+ case GGML_OP_SSM_CONV:
+ case GGML_OP_SSM_SCAN:
+ return has_simdgroup_reduction;
+ case GGML_OP_RWKV_WKV6:
+ case GGML_OP_RWKV_WKV7:
+ return true;
+ case GGML_OP_SOLVE_TRI:
+ case GGML_OP_MUL_MAT:
+ case GGML_OP_MUL_MAT_ID:
+ return has_simdgroup_reduction;
+ case GGML_OP_CPY:
+ case GGML_OP_DUP:
+ case GGML_OP_CONT:
+ {
+ switch (op->src[0]->type) {
+ case GGML_TYPE_F32:
+ switch (op->type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_IQ4_NL:
+ case GGML_TYPE_I32:
+ return true;
+ default:
+ return false;
+ }
+ case GGML_TYPE_F16:
+ switch (op->type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ return true;
+ default:
+ return false;
+ }
+ case GGML_TYPE_BF16:
+ switch (op->type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_BF16:
+ return true;
+ default:
+ return false;
+ }
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_Q8_0:
+ switch (op->type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ return true;
+ default:
+ return false;
+ }
+ case GGML_TYPE_I32:
+ return op->type == GGML_TYPE_F32 || op->type == GGML_TYPE_I32;
+ default:
+ return false;
+ };
+ }
+ case GGML_OP_GET_ROWS:
+ return true;
+ case GGML_OP_SET_ROWS:
+ {
+ if (op->src[0]->type != GGML_TYPE_F32) {
+ return false;
+ }
+
+ switch (op->type) {
+ case GGML_TYPE_F32:
+ case GGML_TYPE_F16:
+ case GGML_TYPE_BF16:
+ case GGML_TYPE_Q8_0:
+ case GGML_TYPE_Q4_0:
+ case GGML_TYPE_Q4_1:
+ case GGML_TYPE_Q5_0:
+ case GGML_TYPE_Q5_1:
+ case GGML_TYPE_IQ4_NL:
+ return true;
+ default:
+ return false;
+ };
+ }
+ case GGML_OP_DIAG:
+ return true;
+ case GGML_OP_OPT_STEP_ADAMW:
+ case GGML_OP_OPT_STEP_SGD:
+ return has_simdgroup_reduction;
+ default:
+ return false;
+ }
+}
+
+const struct ggml_metal_device_props * ggml_metal_device_get_props(ggml_metal_device_t dev) {
+ return &dev->props;
+}
+
+//
+// device buffers
+//
+
+// max memory buffers that can be mapped to the device
+#define GGML_METAL_MAX_BUFFERS 64
+
+struct ggml_metal_buffer_wrapper {
+ void * data;
+ size_t size;
+
+ id<MTLBuffer> metal;
+};
+
+struct ggml_metal_buffer {
+ void * all_data;
+ size_t all_size;
+
+ // if false, the Metal buffer data is allocated in private GPU memory and is not shared with the host
+ bool is_shared;
+ bool owned;
+
+ // multiple buffers are used only to avoid the maximum buffer size limitation when using mmap
+ int n_buffers;
+ struct ggml_metal_buffer_wrapper buffers[GGML_METAL_MAX_BUFFERS];
+
+ bool use_residency_sets;
+
+ // optional MTLResidencySet
+ // note: cannot use explicity "id<MTLResidencySet>" here because it is not available on certain OSes
+ id rset;
+
+ // pointers to global device
+ ggml_metal_device_t dev;
+};
+
+static void ggml_metal_log_allocated_size(id<MTLDevice> device, size_t size_aligned) {
+#ifndef GGML_METAL_NDEBUG
+#if TARGET_OS_OSX || (TARGET_OS_IOS && __clang_major__ >= 15)
+ if (@available(macOS 10.12, iOS 16.0, *)) {
+ GGML_LOG_DEBUG("%s: allocated buffer, size = %8.2f MiB, (%8.2f / %8.2f)\n",
+ __func__,
+ size_aligned / 1024.0 / 1024.0,
+ device.currentAllocatedSize / 1024.0 / 1024.0,
+ device.recommendedMaxWorkingSetSize / 1024.0 / 1024.0);
+
+ if (device.currentAllocatedSize > device.recommendedMaxWorkingSetSize) {
+ GGML_LOG_WARN("%s: warning: current allocated size is greater than the recommended max working set size\n", __func__);
+ }
+ } else {
+ GGML_LOG_INFO("%s: allocated buffer, size = %8.2f MiB, (%8.2f)\n",
+ __func__,
+ size_aligned / 1024.0 / 1024.0,
+ device.currentAllocatedSize / 1024.0 / 1024.0);
+ }
+#endif
+#endif
+ GGML_UNUSED(device);
+ GGML_UNUSED(size_aligned);
+}
+
+// rset init
+static bool ggml_metal_buffer_rset_init(ggml_metal_buffer_t buf) {
+ buf->rset = nil;
+
+ if (!buf->use_residency_sets) {
+ return true;
+ }
+
+#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
+ if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) {
+ MTLResidencySetDescriptor * desc = [[MTLResidencySetDescriptor alloc] init];
+ desc.label = @"ggml_metal";
+ desc.initialCapacity = buf->n_buffers;
+
+ NSError * error;
+ buf->rset = [buf->dev->mtl_device newResidencySetWithDescriptor:desc error:&error];
+ if (error) {
+ GGML_LOG_ERROR("%s: error: %s\n", __func__, [[error description] UTF8String]);
+ [desc release];
+ return false;
+ }
+
+ [desc release];
+
+ for (int i = 0; i < buf->n_buffers; i++) {
+ [buf->rset addAllocation:buf->buffers[i].metal];
+ }
+
+ [buf->rset commit];
+ [buf->rset requestResidency];
+
+ return true;
+ }
+#endif
+
+ return true;
+}
+
+// rset free
+static void ggml_metal_buffer_rset_free(ggml_metal_buffer_t buf) {
+#if defined(GGML_METAL_HAS_RESIDENCY_SETS)
+ if (@available(macOS 15.0, iOS 18.0, tvOS 18.0, visionOS 2.0, *)) {
+ if (buf->rset) {
+ [buf->rset endResidency];
+ [buf->rset removeAllAllocations];
+ [buf->rset release];
+ }
+ }
+#else
+ GGML_UNUSED(buf);
+#endif
+}
+
+static void * ggml_metal_host_malloc(size_t n) {
+ void * data = NULL;
+
+#if TARGET_OS_OSX
+ kern_return_t err = vm_allocate((vm_map_t) mach_task_self(), (void *) &data, n, VM_FLAGS_ANYWHERE);
+ if (err != KERN_SUCCESS) {
+ GGML_LOG_ERROR("%s: error: vm_allocate failed\n", __func__);
+ return NULL;
+ }
+#else
+ const int result = posix_memalign((void **) &data, sysconf(_SC_PAGESIZE), n);
+ if (result != 0) {
+ GGML_LOG_ERROR("%s: error: posix_memalign failed\n", __func__);
+ return NULL;
+ }
+#endif
+
+ return data;
+}
+
+ggml_metal_buffer_t ggml_metal_buffer_init(ggml_metal_device_t dev, size_t size, bool shared) {
+ ggml_metal_buffer_t res = calloc(1, sizeof(struct ggml_metal_buffer));
+
+ res->dev = dev;
+
+ const size_t size_page = sysconf(_SC_PAGESIZE);
+
+ size_t size_aligned = size;
+ if ((size_aligned % size_page) != 0) {
+ size_aligned += (size_page - (size_aligned % size_page));
+ }
+
+ const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev);
+
+ shared = shared && props_dev->use_shared_buffers;
+
+ // allocate shared buffer if the device supports it and it is required by the buffer type
+ if (shared) {
+ res->all_data = ggml_metal_host_malloc(size_aligned);
+ res->is_shared = true;
+ } else {
+ // use virtual address
+ res->all_data = (void *) atomic_fetch_add_explicit(&dev->addr_virt, size_aligned, memory_order_relaxed);
+ res->is_shared = false;
+ }
+ res->all_size = size_aligned;
+
+ res->owned = true;
+
+ res->n_buffers = 1;
+
+ if (res->all_data != NULL) {
+ res->buffers[0].size = size;
+ res->buffers[0].metal = nil;
+
+ if (size_aligned > 0) {
+ if (props_dev->use_shared_buffers && shared) {
+ res->buffers[0].metal = [res->dev->mtl_device newBufferWithBytesNoCopy:res->all_data
+ length:size_aligned
+ options:MTLResourceStorageModeShared
+ deallocator:nil];
+ } else {
+ res->buffers[0].metal = [res->dev->mtl_device newBufferWithLength:size_aligned options:MTLResourceStorageModePrivate];
+ }
+ }
+
+ res->buffers[0].data = res->all_data;
+ }
+
+ if (size_aligned > 0 && (res->all_data == NULL || res->buffers[0].metal == nil)) {
+ GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
+ free(res);
+ return NULL;
+ }
+
+ res->use_residency_sets = props_dev->use_residency_sets;
+
+ if (!ggml_metal_buffer_rset_init(res)) {
+ GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__);
+ free(res);
+ return NULL;
+ }
+
+ ggml_metal_device_rsets_add(dev, res->rset);
+
+ //ggml_metal_log_allocated_size(device, size_aligned);
+
+ return res;
+}
+
+ggml_metal_buffer_t ggml_metal_buffer_map(ggml_metal_device_t dev, void * ptr, size_t size, size_t max_tensor_size) {
+ ggml_metal_buffer_t res = calloc(1, sizeof(struct ggml_metal_buffer));
+
+ res->dev = dev;
+
+ res->all_data = ptr;
+ res->all_size = size;
+
+ res->is_shared = true;
+ res->owned = false;
+
+ res->n_buffers = 0;
+
+ const size_t size_page = sysconf(_SC_PAGESIZE);
+
+ // page-align the data ptr
+ {
+ const uintptr_t offs = (uintptr_t) ptr % size_page;
+ ptr = (void *) ((char *) ptr - offs);
+ size += offs;
+ }
+
+ size_t size_aligned = size;
+ if ((size_aligned % size_page) != 0) {
+ size_aligned += (size_page - (size_aligned % size_page));
+ }
+
+ const struct ggml_metal_device_props * props_dev = ggml_metal_device_get_props(dev);
+
+ // the buffer fits into the max buffer size allowed by the device
+ if (size_aligned <= props_dev->max_buffer_size) {
+ res->buffers[res->n_buffers].data = ptr;
+ res->buffers[res->n_buffers].size = size;
+ res->buffers[res->n_buffers].metal = nil;
+
+ if (size_aligned > 0) {
+ res->buffers[res->n_buffers].metal = [res->dev->mtl_device newBufferWithBytesNoCopy:ptr length:size_aligned options:MTLResourceStorageModeShared deallocator:nil];
+
+ if (res->buffers[res->n_buffers].metal == nil) {
+ GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_aligned / 1024.0 / 1024.0);
+ free(res);
+ return NULL;
+ }
+ }
+
+ ggml_metal_log_allocated_size(res->dev->mtl_device, size_aligned);
+
+ ++res->n_buffers;
+ } else {
+ // this overlap between the views will guarantee that the tensor with the maximum size will fully fit into
+ // one of the views
+ const size_t size_ovlp = ((max_tensor_size + size_page - 1) / size_page + 1) * size_page; // round-up 2 pages just in case
+ const size_t size_step = props_dev->max_buffer_size - size_ovlp;
+ const size_t size_view = props_dev->max_buffer_size;
+
+ for (size_t i = 0; i < size; i += size_step) {
+ const size_t size_step_aligned = (i + size_view <= size) ? size_view : (size_aligned - i);
+
+ res->buffers[res->n_buffers].data = (void *) ((uint8_t *) ptr + i);
+ res->buffers[res->n_buffers].size = size_step_aligned;
+ res->buffers[res->n_buffers].metal = nil;
+
+ if (size_step_aligned > 0) {
+ res->buffers[res->n_buffers].metal = [res->dev->mtl_device newBufferWithBytesNoCopy:(void *) ((uint8_t *) ptr + i) length:size_step_aligned options:MTLResourceStorageModeShared deallocator:nil];
+
+ if (res->buffers[res->n_buffers].metal == nil) {
+ GGML_LOG_ERROR("%s: error: failed to allocate buffer, size = %8.2f MiB\n", __func__, size_step_aligned / 1024.0 / 1024.0);
+ free(res);
+ return NULL;
+ }
+ }
+
+ ggml_metal_log_allocated_size(res->dev->mtl_device, size_step_aligned);
+
+ if (i + size_step < size) {
+ GGML_LOG_INFO("\n");
+ }
+
+ ++res->n_buffers;
+ }
+ }
+
+ res->use_residency_sets = props_dev->use_residency_sets;
+
+ if (!ggml_metal_buffer_rset_init(res)) {
+ GGML_LOG_ERROR("%s: error: failed to initialize residency set\n", __func__);
+ free(res);
+ return NULL;
+ }
+
+ ggml_metal_device_rsets_add(dev, res->rset);
+
+ return res;
+}
+
+void ggml_metal_buffer_free(ggml_metal_buffer_t buf) {
+ ggml_metal_device_rsets_rm(buf->dev, buf->rset);
+
+ for (int i = 0; i < buf->n_buffers; i++) {
+ [buf->buffers[i].metal release];
+ }
+
+ ggml_metal_buffer_rset_free(buf);
+
+ if (buf->is_shared && buf->owned) {
+#if TARGET_OS_OSX
+ vm_deallocate((vm_map_t)mach_task_self(), (vm_address_t)buf->all_data, buf->all_size);
+#else
+ free(buf->all_data);
+#endif
+ }
+
+ free(buf);
+}
+
+void * ggml_metal_buffer_get_base(ggml_metal_buffer_t buf) {
+ return buf->all_data;
+}
+
+bool ggml_metal_buffer_is_shared(ggml_metal_buffer_t buf) {
+ return buf->is_shared;
+}
+
+void ggml_metal_buffer_memset_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
+ if (buf->is_shared) {
+ memset((char *) tensor->data + offset, value, size);
+ return;
+ }
+
+ @autoreleasepool {
+ // dst
+ struct ggml_metal_buffer_id bid_dst = ggml_metal_buffer_get_id(buf, tensor);
+ bid_dst.offs += offset;
+
+ id<MTLCommandBuffer> cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences];
+
+ {
+ id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+ [encoder fillBuffer:bid_dst.metal
+ range:NSMakeRange(bid_dst.offs, bid_dst.offs + size)
+ value:value];
+
+ [encoder endEncoding];
+ }
+
+ [cmd_buf commit];
+ [cmd_buf waitUntilCompleted];
+ }
+}
+
+void ggml_metal_buffer_set_tensor(ggml_metal_buffer_t buf, struct ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ if (buf->is_shared) {
+ memcpy((char *) tensor->data + offset, data, size);
+ return;
+ }
+
+ @autoreleasepool {
+ // src
+ void * data_ptr = (void *)(uintptr_t) data; // "const cast" the src data
+ id<MTLBuffer> buf_src = [buf->dev->mtl_device newBufferWithBytesNoCopy:data_ptr
+ length:size
+ options:MTLResourceStorageModeShared
+ deallocator:nil];
+
+ GGML_ASSERT(buf_src);
+
+ // dst
+ struct ggml_metal_buffer_id bid_dst = ggml_metal_buffer_get_id(buf, tensor);
+ bid_dst.offs += offset;
+
+ // note: for experimentation purposes, here we use a semaphore to wait for the copy to complete
+ // this is alternative to waitUntilCompleted, which should be faster, but don't seem to make much difference
+ dispatch_semaphore_t completion_semaphore = dispatch_semaphore_create(0);
+
+ id<MTLCommandBuffer> cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences];
+
+ {
+ id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+ [encoder copyFromBuffer:buf_src
+ sourceOffset:0
+ toBuffer:bid_dst.metal
+ destinationOffset:bid_dst.offs
+ size:size];
+
+ [encoder endEncoding];
+ }
+
+ [cmd_buf addCompletedHandler:^(id<MTLCommandBuffer> cb) {
+ // TODO: can check for errors here
+ GGML_UNUSED(cb);
+
+ dispatch_semaphore_signal(completion_semaphore);
+ }];
+
+ [cmd_buf commit];
+
+ dispatch_semaphore_wait(completion_semaphore, DISPATCH_TIME_FOREVER);
+ dispatch_release(completion_semaphore);
+
+ //[cmd_buf waitUntilCompleted];
+ }
+}
+
+void ggml_metal_buffer_get_tensor(ggml_metal_buffer_t buf, const struct ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ if (buf->is_shared) {
+ memcpy(data, (const char *) tensor->data + offset, size);
+ return;
+ }
+
+ @autoreleasepool {
+ // src
+ struct ggml_metal_buffer_id bid_src = ggml_metal_buffer_get_id(buf, tensor);
+ bid_src.offs += offset;
+
+ // dst
+ id<MTLBuffer> buf_dst = [buf->dev->mtl_device newBufferWithBytesNoCopy:data
+ length:size
+ options:MTLResourceStorageModeShared
+ deallocator:nil];
+
+ GGML_ASSERT(buf_dst);
+
+ id<MTLCommandBuffer> cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences];
+
+ {
+ id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+ [encoder copyFromBuffer:bid_src.metal
+ sourceOffset:bid_src.offs
+ toBuffer:buf_dst
+ destinationOffset:0
+ size:size];
+
+ [encoder endEncoding];
+ }
+
+ [cmd_buf commit];
+ [cmd_buf waitUntilCompleted];
+ }
+}
+
+void ggml_metal_buffer_clear(ggml_metal_buffer_t buf, uint8_t value) {
+ if (buf->is_shared) {
+ memset(buf->all_data, value, buf->all_size);
+ return;
+ }
+
+ @autoreleasepool {
+ id<MTLCommandBuffer> cmd_buf = [buf->dev->mtl_queue commandBufferWithUnretainedReferences];
+
+ {
+ id<MTLBlitCommandEncoder> encoder = [cmd_buf blitCommandEncoder];
+
+ [encoder fillBuffer:buf->buffers[0].metal
+ range:NSMakeRange(0, buf->buffers[0].size)
+ value:value];
+
+ [encoder endEncoding];
+ }
+
+ [cmd_buf commit];
+ [cmd_buf waitUntilCompleted];
+ }
+}
+
+struct ggml_metal_buffer_id ggml_metal_buffer_get_id(ggml_metal_buffer_t buf, const struct ggml_tensor * t) {
+ struct ggml_metal_buffer_id res = { nil, 0 };
+
+ const int64_t tsize = ggml_nbytes(t);
+
+ // find the view that contains the tensor fully
+ for (int i = 0; i < buf->n_buffers; ++i) {
+ const int64_t ioffs = (int64_t) t->data - (int64_t) buf->buffers[i].data;
+
+ //GGML_LOG_INFO("ioffs = %10ld, tsize = %10ld, sum = %10ld, buf->buffers[%d].size = %10ld\n", ioffs, tsize, ioffs + tsize, i, buf->buffers[i].size);
+ if (ioffs >= 0 && ioffs + tsize <= (int64_t) buf->buffers[i].size) {
+ res.metal = buf->buffers[i].metal;
+ res.offs = (size_t) ioffs;
+
+ //GGML_LOG_INFO("%s: tensor '%16s', offs = %8ld\n", __func__, t->name, *offs);
+
+ return res;
+ }
+ }
+
+ GGML_LOG_ERROR("%s: error: tensor '%s' buffer is nil\n", __func__, t->name);
+
+ return res;
+}
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h b/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h
new file mode 100644
index 0000000..952e1be
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-impl.h
@@ -0,0 +1,1051 @@
+#ifndef GGML_METAL_IMPL
+#define GGML_METAL_IMPL
+
+// kernel parameters for mat-vec threadgroups
+//
+// N_R0: number of src0 rows to process per simdgroup
+// N_SG: number of simdgroups per threadgroup
+//
+// TODO: for optimal performance, become function of the device and work size
+
+#define N_R0_Q4_0 4
+#define N_SG_Q4_0 2
+
+#define N_R0_Q4_1 4
+#define N_SG_Q4_1 2
+
+#define N_R0_Q5_0 4
+#define N_SG_Q5_0 2
+
+#define N_R0_Q5_1 4
+#define N_SG_Q5_1 2
+
+#define N_R0_Q8_0 2
+#define N_SG_Q8_0 4
+
+#define N_R0_MXFP4 2
+#define N_SG_MXFP4 2
+
+#define N_R0_Q2_K 4
+#define N_SG_Q2_K 2
+
+#define N_R0_Q3_K 2
+#define N_SG_Q3_K 2
+
+#define N_R0_Q4_K 2
+#define N_SG_Q4_K 2
+
+#define N_R0_Q5_K 2
+#define N_SG_Q5_K 2
+
+#define N_R0_Q6_K 2
+#define N_SG_Q6_K 2
+
+#define N_R0_IQ1_S 4
+#define N_SG_IQ1_S 2
+
+#define N_R0_IQ1_M 4
+#define N_SG_IQ1_M 2
+
+#define N_R0_IQ2_XXS 4
+#define N_SG_IQ2_XXS 2
+
+#define N_R0_IQ2_XS 4
+#define N_SG_IQ2_XS 2
+
+#define N_R0_IQ2_S 4
+#define N_SG_IQ2_S 2
+
+#define N_R0_IQ3_XXS 4
+#define N_SG_IQ3_XXS 2
+
+#define N_R0_IQ3_S 4
+#define N_SG_IQ3_S 2
+
+#define N_R0_IQ4_NL 2
+#define N_SG_IQ4_NL 2
+
+#define N_R0_IQ4_XS 2
+#define N_SG_IQ4_XS 2
+
+// function constants offsets
+#define FC_FLASH_ATTN_EXT_PAD 100
+#define FC_FLASH_ATTN_EXT_BLK 200
+#define FC_FLASH_ATTN_EXT 300
+#define FC_FLASH_ATTN_EXT_VEC 400
+#define FC_FLASH_ATTN_EXT_VEC_REDUCE 500
+#define FC_MUL_MV 600
+#define FC_MUL_MM 700
+#define FC_ROPE 800
+#define FC_SSM_CONV 900
+#define FC_SOLVE_TRI 1000
+#define FC_COUNT_EQUAL 1100
+#define FC_UNARY 1200
+#define FC_BIN 1300
+
+// op-specific constants
+#define OP_FLASH_ATTN_EXT_NQPSG 8
+#define OP_FLASH_ATTN_EXT_NCPSG 64
+
+#define OP_FLASH_ATTN_EXT_VEC_NQPSG 1
+#define OP_FLASH_ATTN_EXT_VEC_NCPSG 32
+
+#define OP_UNARY_NUM_SCALE 10
+#define OP_UNARY_NUM_FILL 11
+#define OP_UNARY_NUM_CLAMP 12
+#define OP_UNARY_NUM_SQR 13
+#define OP_UNARY_NUM_SQRT 14
+#define OP_UNARY_NUM_SIN 15
+#define OP_UNARY_NUM_COS 16
+#define OP_UNARY_NUM_LOG 17
+#define OP_UNARY_NUM_LEAKY_RELU 18
+
+#define OP_UNARY_NUM_TANH 100
+#define OP_UNARY_NUM_RELU 101
+#define OP_UNARY_NUM_SIGMOID 102
+#define OP_UNARY_NUM_GELU 103
+#define OP_UNARY_NUM_GELU_ERF 104
+#define OP_UNARY_NUM_GELU_QUICK 105
+#define OP_UNARY_NUM_SILU 106
+#define OP_UNARY_NUM_ELU 107
+#define OP_UNARY_NUM_NEG 108
+#define OP_UNARY_NUM_ABS 109
+#define OP_UNARY_NUM_SGN 110
+#define OP_UNARY_NUM_STEP 111
+#define OP_UNARY_NUM_HARDSWISH 112
+#define OP_UNARY_NUM_HARDSIGMOID 113
+#define OP_UNARY_NUM_EXP 114
+#define OP_UNARY_NUM_SOFTPLUS 115
+#define OP_UNARY_NUM_EXPM1 116
+
+
+// kernel argument structs
+//
+// - element counters (e.g. ne00) typically use int32_t to reduce register usage
+// however, be careful from int overflows when using those in the kernel implementation
+//
+// - strides (e.g. nb00) use uint64_t
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne10;
+ int32_t ne11;
+ int32_t ne12;
+ int32_t ne13;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ int32_t dim;
+} ggml_metal_kargs_concat;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ float slope;
+ float scale;
+ float bias;
+ float val;
+ float min;
+ float max;
+} ggml_metal_kargs_unary;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne10;
+ int32_t ne11;
+ int32_t ne12;
+ int32_t ne13;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ uint64_t offs;
+ uint64_t o1[8];
+} ggml_metal_kargs_bin;
+
+typedef struct {
+ int64_t ne0;
+ int64_t ne1;
+ size_t nb01;
+ size_t nb02;
+ size_t nb11;
+ size_t nb21;
+} ggml_metal_kargs_add_id;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_repeat;
+
+typedef struct {
+ int64_t nk0;
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int64_t ne0;
+ int64_t ne1;
+ int64_t ne2;
+ int64_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_cpy;
+
+typedef struct {
+ int64_t ne10;
+ int64_t ne11;
+ int64_t ne12;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ uint64_t offs;
+ bool inplace;
+} ggml_metal_kargs_set;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ int32_t n_past;
+ int32_t n_dims;
+ int32_t n_ctx_orig;
+ float freq_base;
+ float freq_scale;
+ float ext_factor;
+ float attn_factor;
+ float beta_fast;
+ float beta_slow;
+ int32_t sect_0;
+ int32_t sect_1;
+ int32_t sect_2;
+ int32_t sect_3;
+ bool src2;
+} ggml_metal_kargs_rope;
+
+typedef struct {
+ int32_t ne11;
+ int32_t ne_12_2; // assume K and V are same shape
+ int32_t ne_12_3;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ uint64_t nb21;
+ uint64_t nb22;
+ uint64_t nb23;
+ int32_t ne31;
+ int32_t ne32;
+ int32_t ne33;
+ uint64_t nb31;
+ uint64_t nb32;
+ uint64_t nb33;
+} ggml_metal_kargs_flash_attn_ext_pad;
+
+typedef struct {
+ int32_t ne01;
+ int32_t ne30;
+ int32_t ne31;
+ int32_t ne32;
+ int32_t ne33;
+ uint64_t nb31;
+ uint64_t nb32;
+ uint64_t nb33;
+} ggml_metal_kargs_flash_attn_ext_blk;
+
+typedef struct {
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne11;
+ int32_t ne_12_2; // assume K and V are same shape
+ int32_t ne_12_3;
+ int32_t ns10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ns20;
+ uint64_t nb21;
+ uint64_t nb22;
+ uint64_t nb23;
+ int32_t ne31;
+ int32_t ne32;
+ int32_t ne33;
+ uint64_t nb31;
+ uint64_t nb32;
+ uint64_t nb33;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ float scale;
+ float max_bias;
+ float m0;
+ float m1;
+ int32_t n_head_log2;
+ float logit_softcap;
+} ggml_metal_kargs_flash_attn_ext;
+
+typedef struct {
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne11;
+ int32_t ne_12_2; // assume K and V are same shape
+ int32_t ne_12_3;
+ int32_t ns10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ns20;
+ uint64_t nb21;
+ uint64_t nb22;
+ uint64_t nb23;
+ int32_t ne31;
+ int32_t ne32;
+ int32_t ne33;
+ uint64_t nb31;
+ uint64_t nb32;
+ uint64_t nb33;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ float scale;
+ float max_bias;
+ float m0;
+ float m1;
+ int32_t n_head_log2;
+ float logit_softcap;
+} ggml_metal_kargs_flash_attn_ext_vec;
+
+typedef struct {
+ int32_t nrows;
+} ggml_metal_kargs_flash_attn_ext_vec_reduce;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne02;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne12;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ne0;
+ int32_t ne1;
+ int16_t r2;
+ int16_t r3;
+} ggml_metal_kargs_mul_mm;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne10;
+ int32_t ne11;
+ int32_t ne12;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t nr0;
+ int16_t r2;
+ int16_t r3;
+} ggml_metal_kargs_mul_mv;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne10;
+ int32_t ne11;
+ int32_t ne12;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ne0;
+ int32_t ne1;
+ int16_t r2;
+ int16_t r3;
+} ggml_metal_kargs_mul_mv_ext;
+
+typedef struct {
+ int32_t ne02;
+ int32_t ne10;
+ int32_t ne11; // n_expert_used (bcast)
+ uint64_t nb11;
+ uint64_t nb12;
+ int32_t ne21; // n_tokens
+ int32_t ne20; // n_expert_used
+ uint64_t nb21;
+} ggml_metal_kargs_mul_mm_id_map0;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne02;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne11;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ne20;
+ int32_t ne21;
+ int32_t ne0;
+ int32_t ne1;
+ int16_t r2;
+ int16_t r3;
+} ggml_metal_kargs_mul_mm_id;
+
+typedef struct {
+ int32_t nei0;
+ int32_t nei1;
+ uint64_t nbi1;
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ int32_t ne10;
+ int32_t ne11;
+ int32_t ne12;
+ int32_t ne13;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ int32_t ne0;
+ int32_t ne1;
+ uint64_t nb1;
+ int32_t nr0;
+} ggml_metal_kargs_mul_mv_id;
+
+// NORM
+// RMS_NORM
+typedef struct {
+ int32_t ne00;
+ int32_t ne00_t;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ float eps;
+ int32_t nef1[3];
+ int32_t nef2[3];
+ int32_t nef3[3];
+ uint64_t nbf1[3];
+ uint64_t nbf2[3];
+ uint64_t nbf3[3];
+} ggml_metal_kargs_norm;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ float eps;
+} ggml_metal_kargs_l2_norm;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ int32_t ngrp;
+ float eps;
+} ggml_metal_kargs_group_norm;
+
+typedef struct {
+ int32_t IC;
+ int32_t IL;
+ int32_t K;
+ int32_t s0;
+ uint64_t nb0;
+ uint64_t nb1;
+} ggml_metal_kargs_conv_transpose_1d;
+
+typedef struct {
+ int32_t IC;
+ int32_t IH;
+ int32_t IW;
+ int32_t KH;
+ int32_t KW;
+ int32_t OC;
+ int32_t s0;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+} ggml_metal_kargs_conv_transpose_2d;
+
+typedef struct {
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ int32_t IW;
+ int32_t IH;
+ int32_t KW;
+ int32_t KH;
+ int32_t IC;
+ int32_t OC;
+ int32_t OW;
+ int32_t OH;
+ int32_t N;
+ int32_t s0;
+ int32_t s1;
+ int32_t p0;
+ int32_t p1;
+ int32_t d0;
+ int32_t d1;
+} ggml_metal_kargs_conv_2d;
+
+typedef struct {
+ uint64_t ofs0;
+ uint64_t ofs1;
+ int32_t IW;
+ int32_t IH;
+ int32_t CHW;
+ int32_t s0;
+ int32_t s1;
+ int32_t p0;
+ int32_t p1;
+ int32_t d0;
+ int32_t d1;
+ int32_t N;
+ int32_t KH;
+ int32_t KW;
+ int32_t KHW; // KH * KW, pre-computed on CPU to save GPU resources
+} ggml_metal_kargs_im2col;
+
+typedef struct{
+ int32_t ne00;
+ uint64_t nb01;
+ int32_t ne10;
+ uint64_t nb11;
+ int32_t ne0;
+ uint64_t nb1;
+ int32_t i00;
+ int32_t i10;
+ float alpha;
+ float limit;
+} ggml_metal_kargs_glu;
+
+typedef struct {
+ uint64_t np;
+} ggml_metal_kargs_sum;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int64_t ne0;
+ int64_t ne1;
+ int64_t ne2;
+ int64_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_sum_rows;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int64_t net0;
+ int64_t net1;
+ int64_t net2;
+ int64_t net3;
+ uint64_t nbt0;
+ uint64_t nbt1;
+ uint64_t nbt2;
+ uint64_t nbt3;
+ bool outb;
+} ggml_metal_kargs_cumsum_blk;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int64_t net0;
+ int64_t net1;
+ int64_t net2;
+ int64_t net3;
+ uint64_t nbt0;
+ uint64_t nbt1;
+ uint64_t nbt2;
+ uint64_t nbt3;
+} ggml_metal_kargs_cumsum_add;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne11;
+ int32_t ne12;
+ int32_t ne13;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ float scale;
+ float max_bias;
+ float m0;
+ float m1;
+ int32_t n_head_log2;
+} ggml_metal_kargs_soft_max;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ int64_t ne10;
+ int64_t ne11;
+ uint64_t nb10;
+ uint64_t nb11;
+ int64_t ne0;
+ int64_t ne1;
+ int64_t ne2;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+} ggml_metal_kargs_ssm_conv;
+
+typedef struct {
+ int64_t d_state;
+ int64_t d_inner;
+ int64_t n_head;
+ int64_t n_group;
+ int64_t n_seq_tokens;
+ int64_t n_seqs;
+ uint64_t s_off;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t ns12;
+ uint64_t nb13;
+ uint64_t nb20;
+ uint64_t nb21;
+ uint64_t ns21;
+ uint64_t nb22;
+ int64_t ne30;
+ uint64_t nb31;
+ uint64_t nb41;
+ uint64_t nb42;
+ uint64_t ns42;
+ uint64_t nb43;
+ uint64_t nb51;
+ uint64_t nb52;
+ uint64_t ns52;
+ uint64_t nb53;
+ uint64_t nb0;
+} ggml_metal_kargs_ssm_scan;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne10;
+ int32_t ne11;
+ int32_t ne12;
+ int32_t ne13;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_solve_tri;
+
+typedef struct {
+ int32_t ne00t;
+ int32_t ne00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne10;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_get_rows;
+
+typedef struct {
+ int32_t nk0;
+ int32_t ne01;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne11;
+ int32_t ne12;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_set_rows;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_diag;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int64_t ne0;
+ int64_t ne1;
+ int64_t ne2;
+ int64_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ float sf0;
+ float sf1;
+ float sf2;
+ float sf3;
+} ggml_metal_kargs_upscale;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int64_t ne0;
+ int64_t ne1;
+ int64_t ne2;
+ int64_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_pad;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int64_t ne0;
+ int64_t ne1;
+ int64_t ne2;
+ int64_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+ int32_t p0;
+ int32_t p1;
+} ggml_metal_kargs_pad_reflect_1d;
+
+typedef struct {
+ uint64_t nb1;
+ int dim;
+ int max_period;
+} ggml_metal_kargs_timestep_embedding;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ uint64_t nb0;
+ uint64_t nb1;
+ uint64_t nb2;
+ uint64_t nb3;
+} ggml_metal_kargs_tri;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ int32_t top_k;
+} ggml_metal_kargs_argsort;
+
+typedef struct {
+ int64_t ne00;
+ int64_t ne01;
+ int64_t ne02;
+ int64_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ int32_t ne0;
+ int32_t ne1;
+ int32_t ne2;
+ int32_t ne3;
+ int32_t top_k;
+ int32_t len;
+} ggml_metal_kargs_argsort_merge;
+
+typedef struct {
+ int64_t ne0;
+ float start;
+ float step;
+} ggml_metal_kargs_arange;
+
+typedef struct {
+ int64_t val;
+} ggml_metal_kargs_memset;
+
+typedef struct {
+ int32_t ne00;
+ int32_t ne01;
+ int32_t ne02;
+ int32_t ne03;
+ uint64_t nb00;
+ uint64_t nb01;
+ uint64_t nb02;
+ uint64_t nb03;
+ uint64_t nb10;
+ uint64_t nb11;
+ uint64_t nb12;
+ uint64_t nb13;
+} ggml_metal_kargs_count_equal;
+
+typedef struct {
+ int32_t k0;
+ int32_t k1;
+ int32_t s0;
+ int32_t s1;
+ int32_t p0;
+ int32_t p1;
+ int64_t IH;
+ int64_t IW;
+ int64_t OH;
+ int64_t OW;
+ int64_t np;
+} ggml_metal_kargs_pool_2d;
+
+typedef struct {
+ int32_t k0;
+ int32_t s0;
+ int32_t p0;
+ int64_t IW;
+ int64_t OW;
+ int64_t np;
+} ggml_metal_kargs_pool_1d;
+
+typedef struct {
+ int64_t ne00;
+ uint64_t nb01;
+} ggml_metal_kargs_argmax;
+
+typedef struct {
+ int64_t np;
+} ggml_metal_kargs_opt_step_adamw;
+
+typedef struct {
+ int64_t np;
+} ggml_metal_kargs_opt_step_sgd;
+
+#endif // GGML_METAL_IMPL
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.cpp b/llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.cpp
new file mode 100644
index 0000000..7db95d1
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.cpp
@@ -0,0 +1,4222 @@
+#include "ggml-metal-ops.h"
+
+#include "ggml.h"
+#include "ggml-impl.h"
+#include "ggml-backend-impl.h"
+
+#include "ggml-metal-impl.h"
+#include "ggml-metal-common.h"
+#include "ggml-metal-device.h"
+
+#include <cassert>
+#include <algorithm>
+#include <limits>
+#include <cmath>
+
+static ggml_metal_buffer_id ggml_metal_get_buffer_id(const ggml_tensor * t) {
+ if (!t) {
+ return { nullptr, 0 };
+ }
+
+ ggml_backend_buffer_t buffer = t->view_src ? t->view_src->buffer : t->buffer;
+
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t) buffer->context;
+
+ return ggml_metal_buffer_get_id(ctx, t);
+}
+
+struct ggml_metal_op {
+ ggml_metal_op(
+ ggml_metal_device_t dev,
+ ggml_metal_cmd_buf_t cmd_buf,
+ ggml_cgraph * gf,
+ int idx_start,
+ int idx_end,
+ bool use_fusion,
+ bool use_concurrency,
+ bool use_capture,
+ int debug_graph,
+ int debug_fusion) {
+ this->dev = dev;
+ this->lib = ggml_metal_device_get_library(dev);
+ this->enc = ggml_metal_encoder_init(cmd_buf, use_concurrency);
+ this->mem_ranges = ggml_mem_ranges_init(debug_graph);
+ this->idx_start = idx_start;
+ this->idx_end = idx_end;
+ this->use_fusion = use_fusion;
+ this->use_concurrency = use_concurrency;
+ this->use_capture = use_capture;
+ this->debug_graph = debug_graph;
+ this->debug_fusion = debug_fusion;
+ this->gf = gf;
+
+ idxs.reserve(gf->n_nodes);
+
+ // filter empty nodes
+ // TODO: this can be removed when the allocator starts filtering them earlier
+ // https://github.com/ggml-org/llama.cpp/pull/16130#issuecomment-3327905830
+ for (int i = idx_start; i < idx_end; i++) {
+ if (!ggml_op_is_empty(gf->nodes[i]->op) && !ggml_is_empty(gf->nodes[i])) {
+ idxs.push_back(i);
+ }
+ }
+ }
+
+ ~ggml_metal_op() {
+ ggml_metal_encoder_end_encoding(this->enc);
+ ggml_metal_encoder_free(this->enc);
+ ggml_mem_ranges_free(this->mem_ranges);
+ }
+
+ int n_nodes() const {
+ return idxs.size();
+ }
+
+ ggml_tensor * node(int i) const {
+ assert(i >= 0 && i < (int) idxs.size());
+ return ggml_graph_node(gf, idxs[i]);
+ }
+
+ bool can_fuse(int i0, const ggml_op * ops, int n_ops) const {
+ assert(use_fusion);
+ assert(i0 >= 0 && i0 < n_nodes());
+
+ if (i0 + n_ops > n_nodes()) {
+ return false;
+ }
+
+ return ggml_can_fuse_ext(gf, idxs.data() + i0, ops, n_ops);
+ }
+
+ ggml_metal_device_t dev;
+ ggml_metal_library_t lib;
+ ggml_metal_encoder_t enc;
+ ggml_mem_ranges_t mem_ranges;
+
+ bool use_fusion;
+ bool use_concurrency;
+ bool use_capture;
+
+ int debug_graph;
+ int debug_fusion;
+
+private:
+ ggml_cgraph * gf;
+
+ int idx_start;
+ int idx_end;
+
+ // non-empty node indices
+ std::vector<int> idxs;
+};
+
+ggml_metal_op_t ggml_metal_op_init(
+ ggml_metal_device_t dev,
+ ggml_metal_cmd_buf_t cmd_buf,
+ ggml_cgraph * gf,
+ int idx_start,
+ int idx_end,
+ bool use_fusion,
+ bool use_concurrency,
+ bool use_capture,
+ int debug_graph,
+ int debug_fusion) {
+ ggml_metal_op_t res = new ggml_metal_op(
+ dev,
+ cmd_buf,
+ gf,
+ idx_start,
+ idx_end,
+ use_fusion,
+ use_concurrency,
+ use_capture,
+ debug_graph,
+ debug_fusion);
+
+ return res;
+}
+
+void ggml_metal_op_free(ggml_metal_op_t ctx) {
+ delete ctx;
+}
+
+int ggml_metal_op_n_nodes(ggml_metal_op_t ctx) {
+ return ctx->n_nodes();
+}
+
+static bool ggml_metal_op_concurrency_reset(ggml_metal_op_t ctx) {
+ if (!ctx->mem_ranges) {
+ return true;
+ }
+
+ ggml_metal_encoder_memory_barrier(ctx->enc);
+
+ ggml_mem_ranges_reset(ctx->mem_ranges);
+
+ return true;
+}
+
+static bool ggml_metal_op_concurrency_check(ggml_metal_op_t ctx, const ggml_tensor * node) {
+ if (!ctx->mem_ranges) {
+ return false;
+ }
+
+ return ggml_mem_ranges_check(ctx->mem_ranges, node);
+}
+
+static bool ggml_metal_op_concurrency_add(ggml_metal_op_t ctx, const ggml_tensor * node) {
+ if (!ctx->mem_ranges) {
+ return true;
+ }
+
+ return ggml_mem_ranges_add(ctx->mem_ranges, node);
+}
+
+static int ggml_metal_op_encode_impl(ggml_metal_op_t ctx, int idx) {
+ struct ggml_tensor * node = ctx->node(idx);
+
+ //GGML_LOG_INFO("%s: encoding node %3d, op = %8s\n", __func__, idx, ggml_op_name(node->op));
+
+ if (ggml_is_empty(node)) {
+ return 1;
+ }
+
+ switch (node->op) {
+ case GGML_OP_NONE:
+ case GGML_OP_RESHAPE:
+ case GGML_OP_VIEW:
+ case GGML_OP_TRANSPOSE:
+ case GGML_OP_PERMUTE:
+ {
+ // noop -> next node
+ if (ctx->debug_graph > 0) {
+ GGML_LOG_DEBUG("%s: node[%5d] - %-12s %s\n", __func__, idx, ggml_op_name(node->op), "(noop)");
+ }
+ } return 1;
+ default:
+ {
+ } break;
+ }
+
+ if (!ggml_metal_device_supports_op(ctx->dev, node)) {
+ GGML_LOG_ERROR("%s: error: unsupported op '%s'\n", __func__, ggml_op_desc(node));
+ GGML_ABORT("unsupported op");
+ }
+
+ if ((node->flags & GGML_TENSOR_FLAG_COMPUTE) == 0) {
+ return 1;
+ }
+
+ int n_fuse = 1;
+
+ // check if the current node can run concurrently with other nodes before it
+ // the condition is that:
+ // - the current node cannot write to any previous src or dst ranges
+ // - the current node cannot read from any previous dst ranges
+ //
+ // if the condition is not satisfied, we put a memory barrier and clear all ranges
+ // otherwise, we add the new ranges to the encoding context and process the node concurrently
+ //
+ {
+ const bool is_concurrent = ggml_metal_op_concurrency_check(ctx, node);
+
+ if (!is_concurrent) {
+ ggml_metal_op_concurrency_reset(ctx);
+ }
+
+ if (ctx->debug_graph > 0) {
+ GGML_LOG_DEBUG("%s: node[%5d] - %-12s %-12s %s\n", __func__, idx, ggml_op_name(node->op), ggml_get_name(node), is_concurrent ? "(concurrent)" : "");
+ }
+ if (ctx->debug_graph > 1) {
+ GGML_TENSOR_LOCALS( int64_t, ne0, node->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, node->src[0], nb);
+ GGML_TENSOR_LOCALS( int64_t, ne1, node->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, node->src[1], nb);
+ GGML_TENSOR_LOCALS( int64_t, ne2, node->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, node->src[2], nb);
+ GGML_TENSOR_LOCALS( int64_t, ne3, node->src[3], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb3, node->src[3], nb);
+ GGML_TENSOR_LOCALS( int64_t, ne, node, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, node, nb);
+
+ if (node->src[0]) {
+ GGML_LOG_DEBUG("%s: src0 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[0]->type), ne00, ne01, ne02, ne03, nb00, nb01, nb02, nb03,
+ ggml_is_contiguous(node->src[0]), node->src[0]->name);
+ }
+ if (node->src[1]) {
+ GGML_LOG_DEBUG("%s: src1 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[1]->type), ne10, ne11, ne12, ne13, nb10, nb11, nb12, nb13,
+ ggml_is_contiguous(node->src[1]), node->src[1]->name);
+ }
+ if (node->src[2]) {
+ GGML_LOG_DEBUG("%s: src2 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[2]->type), ne20, ne21, ne22, ne23, nb20, nb21, nb22, nb23,
+ ggml_is_contiguous(node->src[2]), node->src[2]->name);
+ }
+ if (node->src[3]) {
+ GGML_LOG_DEBUG("%s: src3 - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], %d, %s\n", __func__, ggml_type_name(node->src[3]->type), ne30, ne31, ne32, ne33, nb30, nb31, nb32, nb33,
+ ggml_is_contiguous(node->src[3]), node->src[3]->name);
+ }
+ if (node) {
+ GGML_LOG_DEBUG("%s: node - %4s [%5lld, %5lld, %5lld, %5lld] [%5lld, %5lld, %5lld, %5lld], 1, %s\n", __func__, ggml_type_name(node->type), ne0, ne1, ne2, ne3, nb0, nb1, nb2, nb3,
+ node->name);
+ }
+ }
+ }
+
+ switch (node->op) {
+ case GGML_OP_CONCAT:
+ {
+ n_fuse = ggml_metal_op_concat(ctx, idx);
+ } break;
+ case GGML_OP_ADD:
+ case GGML_OP_SUB:
+ case GGML_OP_MUL:
+ case GGML_OP_DIV:
+ {
+ n_fuse = ggml_metal_op_bin(ctx, idx);
+ } break;
+ case GGML_OP_ADD_ID:
+ {
+ n_fuse = ggml_metal_op_add_id(ctx, idx);
+ } break;
+ case GGML_OP_REPEAT:
+ {
+ n_fuse = ggml_metal_op_repeat(ctx, idx);
+ } break;
+ case GGML_OP_ACC:
+ {
+ n_fuse = ggml_metal_op_acc(ctx, idx);
+ } break;
+ case GGML_OP_SCALE:
+ case GGML_OP_FILL:
+ case GGML_OP_CLAMP:
+ case GGML_OP_LEAKY_RELU:
+ case GGML_OP_SQR:
+ case GGML_OP_SQRT:
+ case GGML_OP_SIN:
+ case GGML_OP_COS:
+ case GGML_OP_LOG:
+ case GGML_OP_UNARY:
+ {
+ n_fuse = ggml_metal_op_unary(ctx, idx);
+ } break;
+ case GGML_OP_GLU:
+ {
+ n_fuse = ggml_metal_op_glu(ctx, idx);
+ } break;
+ case GGML_OP_SUM:
+ {
+ n_fuse = ggml_metal_op_sum(ctx, idx);
+ } break;
+ case GGML_OP_SUM_ROWS:
+ case GGML_OP_MEAN:
+ {
+ n_fuse = ggml_metal_op_sum_rows(ctx, idx);
+ } break;
+ case GGML_OP_CUMSUM:
+ {
+ n_fuse = ggml_metal_op_cumsum(ctx, idx);
+ } break;
+ case GGML_OP_SOFT_MAX:
+ {
+ n_fuse = ggml_metal_op_soft_max(ctx, idx);
+ } break;
+ case GGML_OP_SSM_CONV:
+ {
+ n_fuse = ggml_metal_op_ssm_conv(ctx, idx);
+ } break;
+ case GGML_OP_SSM_SCAN:
+ {
+ n_fuse = ggml_metal_op_ssm_scan(ctx, idx);
+ } break;
+ case GGML_OP_RWKV_WKV6:
+ case GGML_OP_RWKV_WKV7:
+ {
+ n_fuse = ggml_metal_op_rwkv(ctx, idx);
+ } break;
+ case GGML_OP_SOLVE_TRI:
+ {
+ n_fuse = ggml_metal_op_solve_tri(ctx, idx);
+ } break;
+ case GGML_OP_MUL_MAT:
+ {
+ n_fuse = ggml_metal_op_mul_mat(ctx, idx);
+ } break;
+ case GGML_OP_MUL_MAT_ID:
+ {
+ n_fuse = ggml_metal_op_mul_mat_id(ctx, idx);
+ } break;
+ case GGML_OP_GET_ROWS:
+ {
+ n_fuse = ggml_metal_op_get_rows(ctx, idx);
+ } break;
+ case GGML_OP_SET_ROWS:
+ {
+ n_fuse = ggml_metal_op_set_rows(ctx, idx);
+ } break;
+ case GGML_OP_DIAG:
+ {
+ n_fuse = ggml_metal_op_diag(ctx, idx);
+ } break;
+ case GGML_OP_L2_NORM:
+ {
+ n_fuse = ggml_metal_op_l2_norm(ctx, idx);
+ } break;
+ case GGML_OP_GROUP_NORM:
+ {
+ n_fuse = ggml_metal_op_group_norm(ctx, idx);
+ } break;
+ case GGML_OP_NORM:
+ case GGML_OP_RMS_NORM:
+ {
+ n_fuse = ggml_metal_op_norm(ctx, idx);
+ } break;
+ case GGML_OP_ROPE:
+ {
+ n_fuse = ggml_metal_op_rope(ctx, idx);
+ } break;
+ case GGML_OP_IM2COL:
+ {
+ n_fuse = ggml_metal_op_im2col(ctx, idx);
+ } break;
+ case GGML_OP_CONV_2D:
+ {
+ n_fuse = ggml_metal_op_conv_2d(ctx, idx);
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_1D:
+ {
+ n_fuse = ggml_metal_op_conv_transpose_1d(ctx, idx);
+ } break;
+ case GGML_OP_CONV_TRANSPOSE_2D:
+ {
+ n_fuse = ggml_metal_op_conv_transpose_2d(ctx, idx);
+ } break;
+ case GGML_OP_UPSCALE:
+ {
+ n_fuse = ggml_metal_op_upscale(ctx, idx);
+ } break;
+ case GGML_OP_PAD:
+ {
+ n_fuse = ggml_metal_op_pad(ctx, idx);
+ } break;
+ case GGML_OP_PAD_REFLECT_1D:
+ {
+ n_fuse = ggml_metal_op_pad_reflect_1d(ctx, idx);
+ } break;
+ case GGML_OP_ARANGE:
+ {
+ n_fuse = ggml_metal_op_arange(ctx, idx);
+ } break;
+ case GGML_OP_TIMESTEP_EMBEDDING:
+ {
+ n_fuse = ggml_metal_op_timestep_embedding(ctx, idx);
+ } break;
+ case GGML_OP_ARGSORT:
+ {
+ n_fuse = ggml_metal_op_argsort(ctx, idx);
+ } break;
+ case GGML_OP_TOP_K:
+ {
+ n_fuse = ggml_metal_op_top_k(ctx, idx);
+ } break;
+ case GGML_OP_TRI:
+ {
+ n_fuse = ggml_metal_op_tri(ctx, idx);
+ } break;
+ case GGML_OP_FLASH_ATTN_EXT:
+ {
+ n_fuse = ggml_metal_op_flash_attn_ext(ctx, idx);
+ } break;
+ case GGML_OP_DUP:
+ case GGML_OP_CPY:
+ case GGML_OP_CONT:
+ {
+ n_fuse = ggml_metal_op_cpy(ctx, idx);
+ } break;
+ case GGML_OP_POOL_1D:
+ {
+ n_fuse = ggml_metal_op_pool_1d(ctx, idx);
+ } break;
+ case GGML_OP_POOL_2D:
+ {
+ n_fuse = ggml_metal_op_pool_2d(ctx, idx);
+ } break;
+ case GGML_OP_ARGMAX:
+ {
+ n_fuse = ggml_metal_op_argmax(ctx, idx);
+ } break;
+ case GGML_OP_OPT_STEP_ADAMW:
+ {
+ n_fuse = ggml_metal_op_opt_step_adamw(ctx, idx);
+ } break;
+ case GGML_OP_OPT_STEP_SGD:
+ {
+ n_fuse = ggml_metal_op_opt_step_sgd(ctx, idx);
+ } break;
+ case GGML_OP_COUNT_EQUAL:
+ {
+ n_fuse = ggml_metal_op_count_equal(ctx, idx);
+ } break;
+ default:
+ {
+ GGML_LOG_ERROR("%s: error: node %3d, op = %8s not implemented\n", __func__, idx, ggml_op_name(node->op));
+ GGML_ABORT("fatal error");
+ }
+ }
+
+ if (ctx->debug_graph > 0) {
+ if (n_fuse > 1) {
+ GGML_LOG_DEBUG("%s: fuse %d ops\n", __func__, n_fuse);
+ }
+ }
+
+ // update the mem ranges in the encoding context
+ for (int i = 0; i < n_fuse; ++i) {
+ if (!ggml_metal_op_concurrency_add(ctx, ctx->node(idx + i))) {
+ ggml_metal_op_concurrency_reset(ctx);
+ }
+ }
+
+ return n_fuse;
+}
+
+int ggml_metal_op_encode(ggml_metal_op_t ctx, int idx) {
+ if (ctx->use_capture) {
+ ggml_metal_encoder_debug_group_push(ctx->enc, ggml_op_desc(ctx->node(idx)));
+ }
+
+ int res = ggml_metal_op_encode_impl(ctx, idx);
+ if (idx + res > ctx->n_nodes()) {
+ GGML_ABORT("fusion error: nodes spanning multiple encoders have been fused. this indicates a bug in the fusion logic %s",
+ "https://github.com/ggml-org/llama.cpp/pull/14849");
+ }
+
+ if (ctx->use_capture) {
+ ggml_metal_encoder_debug_group_pop(ctx->enc);
+ }
+
+ return res;
+}
+
+int ggml_metal_op_concat(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int32_t dim = ((const int32_t *) op->op_params)[0];
+
+ ggml_metal_kargs_concat args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.ne13 =*/ ne13,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.dim =*/ dim,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_base(lib, GGML_OP_CONCAT);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ const int nth = std::min(1024, ne0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_repeat(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_repeat(lib, op->type);
+
+ ggml_metal_kargs_repeat args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_acc(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->type == GGML_TYPE_F32);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(ggml_is_contiguous(op->src[1]));
+
+ const size_t pnb1 = ((const int32_t *) op->op_params)[0];
+ const size_t pnb2 = ((const int32_t *) op->op_params)[1];
+ const size_t pnb3 = ((const int32_t *) op->op_params)[2];
+ const size_t offs = ((const int32_t *) op->op_params)[3];
+
+ const bool inplace = (bool) ((const int32_t *) op->op_params)[4];
+
+ if (!inplace) {
+ // run a separete kernel to cpy src->dst
+ // not sure how to avoid this
+ // TODO: make a simpler cpy_bytes kernel
+
+ //const id<MTLComputePipelineState> pipeline = ctx->pipelines[GGML_METAL_PIPELINE_TYPE_CPY_F32_F32].obj;
+ auto pipeline = ggml_metal_library_get_pipeline_cpy(lib, op->src[0]->type, op->type);
+
+ ggml_metal_kargs_cpy args = {
+ /*.nk0 =*/ ne00,
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+ ggml_metal_op_concurrency_reset(ctx);
+ }
+
+ ggml_metal_kargs_bin args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ pnb1,
+ /*.nb02 =*/ pnb2,
+ /*.nb03 =*/ pnb3,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.ne13 =*/ ne13,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ pnb1,
+ /*.nb2 =*/ pnb2,
+ /*.nb3 =*/ pnb3,
+ /*.offs =*/ offs,
+ /*.o1 =*/ { 0 },
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_bin_one(lib, GGML_OP_ADD);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne11, ne12, ne13, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_unary(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ ggml_metal_kargs_unary args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.slope =*/ 0.0,
+ /*.scale =*/ 0.0,
+ /*.bias =*/ 0.0,
+ /*.val =*/ 0.0,
+ /*.min =*/ 0.0,
+ /*.max =*/ 0.0,
+ };
+
+ if (op->op == GGML_OP_LEAKY_RELU) {
+ args.slope = ggml_get_op_params_f32(op, 0);
+ }
+
+ if (op->op == GGML_OP_SCALE) {
+ args.scale = ggml_get_op_params_f32(op, 0);
+ args.bias = ggml_get_op_params_f32(op, 1);
+ }
+
+ if (op->op == GGML_OP_FILL) {
+ args.val = ggml_get_op_params_f32(op, 0);
+ }
+
+ if (op->op == GGML_OP_CLAMP) {
+ args.min = ggml_get_op_params_f32(op, 0);
+ args.max = ggml_get_op_params_f32(op, 1);
+ }
+
+ auto pipeline = ggml_metal_library_get_pipeline_unary(lib, op);
+
+ if (pipeline.c4) {
+ args.ne00 = ne00/4;
+ args.ne0 = ne0/4;
+ }
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+
+ if (pipeline.cnt) {
+ const int n = pipeline.c4 ? ggml_nelements(op)/4 : ggml_nelements(op);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1);
+ } else {
+ const int nth_max = MIN(256, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ const int nth = MIN(args.ne00, nth_max);
+
+ const int nk0 = (args.ne00 + nth - 1)/nth;
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nk0*ne01, ne02, ne03, nth, 1, 1);
+ }
+
+ return 1;
+}
+
+int ggml_metal_op_glu(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ if (op->src[1]) {
+ GGML_ASSERT(ggml_are_same_shape(op->src[0], op->src[1]));
+ }
+
+ auto pipeline = ggml_metal_library_get_pipeline_glu(lib, op);
+
+ const int32_t swp = ggml_get_op_params_i32(op, 1);
+ const float alpha = ggml_get_op_params_f32(op, 2);
+ const float limit = ggml_get_op_params_f32(op, 3);
+
+ const int32_t i00 = swp ? ne0 : 0;
+ const int32_t i10 = swp ? 0 : ne0;
+
+ ggml_metal_kargs_glu args = {
+ /*.ne00 =*/ ne00,
+ /*.nb01 =*/ nb01,
+ /*.ne10 =*/ op->src[1] ? ne10 : ne00,
+ /*.nb11 =*/ op->src[1] ? nb11 : nb01,
+ /*.ne0 =*/ ne0,
+ /*.nb1 =*/ nb1,
+ /*.i00 =*/ op->src[1] ? 0 : i00,
+ /*.i10 =*/ op->src[1] ? 0 : i10,
+ /*.alpha=*/ alpha,
+ /*.limit=*/ limit
+ };
+
+ const int64_t nrows = ggml_nrows(op->src[0]);
+
+ const int32_t nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00/2);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ if (op->src[1]) {
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ } else {
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 2);
+ }
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_sum(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ const uint64_t n = (uint64_t) ggml_nelements(op->src[0]);
+
+ ggml_metal_kargs_sum args = {
+ /*.np =*/ n,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_sum(lib, op);
+
+ int nth = 32; // SIMD width
+
+ while (nth < (int) n && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+ nth = std::min(nth, (int) n);
+
+ const int nsg = (nth + 31) / 32;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, nsg * sizeof(float), 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_sum_rows(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_sum_rows args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_sum_rows(lib, op);
+
+ int nth = 32; // SIMD width
+
+ while (nth < ne00 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+ nth = std::min(nth, ne00);
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_cumsum(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline_blk = ggml_metal_library_get_pipeline_cumsum_blk(lib, op);
+
+ int nth = 1;
+ while (nth < ne00 && 2*nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline_blk)) {
+ nth *= 2;
+ }
+
+ GGML_ASSERT(ne00 <= nth*nth);
+
+ const int64_t net0 = (ne00 + nth - 1) / nth;
+ const int64_t net1 = ne01;
+ const int64_t net2 = ne02;
+ const int64_t net3 = ne03;
+
+ const uint64_t nbt0 = sizeof(float);
+ const uint64_t nbt1 = net0*nbt0;
+ const uint64_t nbt2 = net1*nbt1;
+ const uint64_t nbt3 = net2*nbt2;
+
+ const size_t smem = GGML_PAD(32*sizeof(float), 16);
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ ggml_metal_buffer_id bid_tmp = bid_dst;
+ bid_tmp.offs += ggml_nbytes(op);
+
+ {
+ ggml_metal_kargs_cumsum_blk args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.net0 =*/ net0,
+ /*.net1 =*/ net1,
+ /*.net2 =*/ net2,
+ /*.net3 =*/ net3,
+ /*.nbt0 =*/ nbt0,
+ /*.nbt1 =*/ nbt1,
+ /*.nbt2 =*/ nbt2,
+ /*.nbt3 =*/ nbt3,
+ /*.outb =*/ ne00 > nth,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline_blk);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 3);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, net0*ne01, ne02, ne03, nth, 1, 1);
+ }
+
+ if (ne00 > nth) {
+ ggml_metal_op_concurrency_reset(ctx);
+
+ {
+ ggml_metal_kargs_cumsum_blk args = {
+ /*.ne00 =*/ net0,
+ /*.ne01 =*/ net1,
+ /*.ne02 =*/ net2,
+ /*.ne03 =*/ net3,
+ /*.nb00 =*/ nbt0,
+ /*.nb01 =*/ nbt1,
+ /*.nb02 =*/ nbt2,
+ /*.nb03 =*/ nbt3,
+ /*.net0 =*/ net0,
+ /*.net1 =*/ net1,
+ /*.net2 =*/ net2,
+ /*.net3 =*/ net3,
+ /*.nbt0 =*/ nbt0,
+ /*.nbt1 =*/ nbt1,
+ /*.nbt2 =*/ nbt2,
+ /*.nbt3 =*/ nbt3,
+ /*.outb =*/ false,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline_blk);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 3);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, net1, net2, net3, nth, 1, 1);
+ }
+
+ ggml_metal_op_concurrency_reset(ctx);
+
+ {
+ auto pipeline_add = ggml_metal_library_get_pipeline_cumsum_add(lib, op);
+
+ ggml_metal_kargs_cumsum_add args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.net0 =*/ net0,
+ /*.net1 =*/ net1,
+ /*.net2 =*/ net2,
+ /*.net3 =*/ net3,
+ /*.nbt0 =*/ nbt0,
+ /*.nbt1 =*/ nbt1,
+ /*.nbt2 =*/ nbt2,
+ /*.nbt3 =*/ nbt3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline_add);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, net0*ne01, ne02, ne03, nth, 1, 1);
+ }
+ }
+
+ return 1;
+}
+
+int ggml_metal_op_get_rows(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_get_rows(lib, op->src[0]->type);
+
+ ggml_metal_kargs_get_rows args = {
+ /*.ne00t =*/ ggml_is_quantized(op->src[0]->type) ? ne00/16 : ne00,
+ /*.ne00 =*/ ne00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne10 =*/ ne10,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ const int nth = std::min(args.ne00t, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ const int nw0 = (args.ne00t + nth - 1)/nth;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nw0*ne10, ne11, ne12, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_set_rows(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_set_rows(lib, op->src[1]->type, op->type);
+
+ const int32_t nk0 = ne0/ggml_blck_size(op->type);
+
+ int nth = 32; // SIMD width
+
+ while (nth < nk0 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ int nrptg = 1;
+ if (nth > nk0) {
+ nrptg = (nth + nk0 - 1)/nk0;
+ nth = nk0;
+
+ if (nrptg*nth > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nrptg--;
+ }
+ }
+
+ nth = std::min(nth, nk0);
+
+ ggml_metal_kargs_set_rows args = {
+ /*.nk0 =*/ nk0,
+ /*.ne01 =*/ ne01,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nrptg - 1)/nrptg, ne02, ne03, nth, nrptg, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_diag(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS(int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS(int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_diag args = {
+ /*.ne00 =*/ne00,
+ /*.ne01 =*/ne01,
+ /*.ne02 =*/ne02,
+ /*.ne03 =*/ne03,
+ /*.nb00 =*/nb00,
+ /*.nb01 =*/nb01,
+ /*.nb02 =*/nb02,
+ /*.nb03 =*/nb03,
+ /*.ne0 =*/ne0,
+ /*.ne1 =*/ne1,
+ /*.ne2 =*/ne2,
+ /*.ne3 =*/ne3,
+ /*.nb0 =*/nb0,
+ /*.nb1 =*/nb1,
+ /*.nb2 =*/nb2,
+ /*.nb3 =*/nb3,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_diag(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, 32, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_soft_max(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ float scale;
+ float max_bias;
+
+ memcpy(&scale, ((const int32_t *) op->op_params) + 0, sizeof(scale));
+ memcpy(&max_bias, ((const int32_t *) op->op_params) + 1, sizeof(max_bias));
+
+ const uint32_t n_head = op->src[0]->ne[2];
+ const int32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
+
+ const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
+ const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+ // softmax
+
+ ggml_metal_kargs_soft_max args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.ne13 =*/ ne13,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.scale =*/ scale,
+ /*.max_bias =*/ max_bias,
+ /*.m0 =*/ m0,
+ /*.m1 =*/ m1,
+ /*.n_head_log2 =*/ n_head_log2,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_soft_max(lib, op);
+
+ int nth = 32; // SIMD width
+
+ if (ne00%4 == 0) {
+ while (nth < ne00/4 && nth*ne01*ne02*ne03 < 256) {
+ nth *= 2;
+ }
+ } else {
+ while (nth < ne00 && nth*ne01*ne02*ne03 < 256) {
+ nth *= 2;
+ }
+ }
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ if (op->src[1]) {
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ } else {
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 2);
+ }
+ if (op->src[2]) {
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+ } else {
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 3);
+ }
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 4);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_ssm_conv(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_ssm_conv args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ };
+
+ // Use batched kernel for prefill (ne1 > 1) to reduce threadgroup dispatch overhead
+ const bool use_batched = (ne1 > 1);
+
+ if (use_batched) {
+ // Determine the smallest power of 2 that's >= ne1, but <= 256
+ int BATCH_SIZE;
+ if (ne1 > 128) BATCH_SIZE = 256;
+ else if (ne1 > 64 ) BATCH_SIZE = 128;
+ else if (ne1 > 32 ) BATCH_SIZE = 64;
+ else if (ne1 > 16 ) BATCH_SIZE = 32;
+ else if (ne1 > 8 ) BATCH_SIZE = 16;
+ else if (ne1 > 4 ) BATCH_SIZE = 8;
+ else BATCH_SIZE = 2;
+
+ auto pipeline = ggml_metal_library_get_pipeline_ssm_conv_batched(lib, op, BATCH_SIZE);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 3);
+
+ // Dispatch: ne01 rows, ceil(ne1/BATCH_SIZE) token batches, ne02 sequences
+ // Each threadgroup has BATCH_SIZE threads, each handling one token
+ const int n_token_batches = (ne1 + BATCH_SIZE - 1) / BATCH_SIZE;
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, n_token_batches, ne02, BATCH_SIZE, 1, 1);
+ } else {
+ auto pipeline = ggml_metal_library_get_pipeline_ssm_conv(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne1, ne02, 1, 1, 1);
+ }
+
+ return 1;
+}
+
+int ggml_metal_op_ssm_scan(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne4, op->src[4], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb4, op->src[4], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne5, op->src[5], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb5, op->src[5], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne6, op->src[6], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb6, op->src[6], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const ggml_tensor * src3 = op->src[3];
+ const ggml_tensor * src4 = op->src[4];
+ const ggml_tensor * src5 = op->src[5];
+ const ggml_tensor * src6 = op->src[6];
+
+ GGML_ASSERT(src3);
+ GGML_ASSERT(src4);
+ GGML_ASSERT(src5);
+ GGML_ASSERT(src6);
+
+ const int64_t d_state = ne00;
+ const int64_t d_inner = ne01;
+ const int64_t n_head = ne02;
+ const int64_t n_group = ne41;
+ const int64_t n_seq_tokens = ne12;
+ const int64_t n_seqs = ne13;
+
+ ggml_metal_kargs_ssm_scan args = {
+ /*.d_state =*/ d_state,
+ /*.d_inner =*/ d_inner,
+ /*.n_head =*/ n_head,
+ /*.n_group =*/ n_group,
+ /*.n_seq_tokens =*/ n_seq_tokens,
+ /*.n_seqs =*/ n_seqs,
+ /*.s_off =*/ ggml_nelements(op->src[1]) * sizeof(float),
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.ns12 =*/ nb12/nb10,
+ /*.nb13 =*/ nb13,
+ /*.nb20 =*/ nb20,
+ /*.nb21 =*/ nb21,
+ /*.ns21 =*/ nb21/nb20,
+ /*.nb22 =*/ nb22,
+ /*.ne30 =*/ ne30,
+ /*.nb31 =*/ nb31,
+ /*.nb41 =*/ nb41,
+ /*.nb42 =*/ nb42,
+ /*.ns42 =*/ nb42/nb40,
+ /*.nb43 =*/ nb43,
+ /*.nb51 =*/ nb51,
+ /*.nb52 =*/ nb52,
+ /*.ns52 =*/ nb52/nb50,
+ /*.nb53 =*/ nb53,
+ /*.nb0 =*/ nb0,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_ssm_scan(lib, op);
+
+ GGML_ASSERT(d_state <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[3]), 4);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[4]), 5);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[5]), 6);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[6]), 7);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 8);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, d_inner, n_head, n_seqs, d_state, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_rwkv(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int64_t B = op->op == GGML_OP_RWKV_WKV6 ? op->src[5]->ne[1] : op->src[6]->ne[1];
+ const int64_t T = op->src[0]->ne[2];
+ const int64_t C = op->ne[0];
+ const int64_t H = op->src[0]->ne[1];
+
+ auto pipeline = ggml_metal_library_get_pipeline_rwkv(lib, op);
+
+ int ida = 0;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[3]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[4]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[5]), ida++);
+ if (op->op == GGML_OP_RWKV_WKV7) {
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[6]), ida++);
+ }
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), ida++);
+ ggml_metal_encoder_set_bytes (enc, (void *) &B, sizeof(B), ida++);
+ ggml_metal_encoder_set_bytes (enc, (void *) &T, sizeof(T), ida++);
+ ggml_metal_encoder_set_bytes (enc, (void *) &C, sizeof(C), ida++);
+ ggml_metal_encoder_set_bytes (enc, (void *) &H, sizeof(H), ida++);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, B * H, 1, 1, C/H, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_solve_tri(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_solve_tri args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.ne13 =*/ ne13,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_solve_tri(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ const int nsg = pipeline.nsg;
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, pipeline.smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne10 + nsg - 1)/nsg, ne02, ne03, 32, nsg, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_cpy(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_cpy(lib, op->src[0]->type, op->type);
+
+ GGML_ASSERT(ne00 % ggml_blck_size(op->src[0]->type) == 0);
+
+ int64_t nk0 = ne00;
+ if (ggml_is_quantized(op->src[0]->type)) {
+ nk0 = ne00/16;
+ } else if (ggml_is_quantized(op->type)) {
+ nk0 = ne00/ggml_blck_size(op->type);
+ }
+
+ int nth = std::min<int>(nk0, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ // when rows are small, we can batch them together in a single threadgroup
+ int nrptg = 1;
+
+ // TODO: relax this constraint in the future
+ if (ggml_blck_size(op->src[0]->type) == 1 && ggml_blck_size(op->type) == 1) {
+ if (nth > nk0) {
+ nrptg = (nth + nk0 - 1)/nk0;
+ nth = nk0;
+
+ if (nrptg*nth > ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nrptg--;
+ }
+ }
+ }
+
+ nth = std::min<int>(nth, nk0);
+
+ ggml_metal_kargs_cpy args = {
+ /*.nk0 =*/ nk0,
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ const int nw0 = nrptg == 1 ? (nk0 + nth - 1)/nth : 1;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nw0*(ne01 + nrptg - 1)/nrptg, ne02, ne03, nth, nrptg, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_pool_1d(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int32_t * opts = op->op_params;
+ ggml_op_pool op_pool = (ggml_op_pool) opts[0];
+
+ const int32_t k0 = opts[1];
+ const int32_t s0 = opts[2];
+ const int32_t p0 = opts[3];
+
+ const int64_t IW = op->src[0]->ne[0];
+ const int64_t OW = op->ne[0];
+
+ const int64_t np = ggml_nelements(op);
+
+ ggml_metal_kargs_pool_1d args_pool_1d = {
+ /* .k0 = */ k0,
+ /* .s0 = */ s0,
+ /* .p0 = */ p0,
+ /* .IW = */ IW,
+ /* .OW = */ OW,
+ /* .np = */ np
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_pool_1d(lib, op, op_pool);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), (int) np);
+ const int ntg = (np + nth - 1) / nth;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args_pool_1d, sizeof(args_pool_1d), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ntg, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+
+int ggml_metal_op_pool_2d(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int32_t * opts = op->op_params;
+ ggml_op_pool op_pool = (ggml_op_pool) opts[0];
+
+ const int32_t k0 = opts[1];
+ const int32_t k1 = opts[2];
+ const int32_t s0 = opts[3];
+ const int32_t s1 = opts[4];
+ const int32_t p0 = opts[5];
+ const int32_t p1 = opts[6];
+
+ const int64_t IH = op->src[0]->ne[1];
+ const int64_t IW = op->src[0]->ne[0];
+
+ const int64_t N = op->ne[3];
+ const int64_t OC = op->ne[2];
+ const int64_t OH = op->ne[1];
+ const int64_t OW = op->ne[0];
+
+ const int64_t np = N * OC * OH * OW;
+
+ ggml_metal_kargs_pool_2d args_pool_2d = {
+ /* .k0 = */ k0,
+ /* .k1 = */ k1,
+ /* .s0 = */ s0,
+ /* .s1 = */ s1,
+ /* .p0 = */ p0,
+ /* .p1 = */ p1,
+ /* .IH = */ IH,
+ /* .IW = */ IW,
+ /* .OH = */ OH,
+ /* .OW = */ OW,
+ /* .np = */ np
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_pool_2d(lib, op, op_pool);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), (int) np);
+ const int ntg = (np + nth - 1) / nth;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args_pool_2d, sizeof(args_pool_2d), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ntg, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_mul_mat(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev);
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ GGML_ASSERT(ne00 == ne10);
+
+ GGML_ASSERT(ne12 % ne02 == 0);
+ GGML_ASSERT(ne13 % ne03 == 0);
+
+ const int16_t r2 = ne12/ne02;
+ const int16_t r3 = ne13/ne03;
+
+ // find the break-even point where the matrix-matrix kernel becomes more efficient compared
+ // to the matrix-vector kernel
+ const int ne11_mm_min = 8;
+
+ // first try to use small-batch mat-mv kernels
+ // these should be efficient for BS [2, ~8]
+ if (op->src[1]->type == GGML_TYPE_F32 && (ne00%128 == 0) &&
+ (
+ (
+ (
+ op->src[0]->type == GGML_TYPE_F32 || // TODO: helper function
+ op->src[0]->type == GGML_TYPE_F16 ||
+ op->src[0]->type == GGML_TYPE_Q4_0 ||
+ op->src[0]->type == GGML_TYPE_Q4_1 ||
+ op->src[0]->type == GGML_TYPE_Q5_0 ||
+ op->src[0]->type == GGML_TYPE_Q5_1 ||
+ op->src[0]->type == GGML_TYPE_Q8_0 ||
+ op->src[0]->type == GGML_TYPE_MXFP4 ||
+ op->src[0]->type == GGML_TYPE_IQ4_NL ||
+ false) && (ne11 >= 2 && ne11 <= 8)
+ ) ||
+ (
+ (
+ op->src[0]->type == GGML_TYPE_Q4_K ||
+ op->src[0]->type == GGML_TYPE_Q5_K ||
+ op->src[0]->type == GGML_TYPE_Q6_K ||
+ false) && (ne11 >= 4 && ne11 <= 8)
+ )
+ )
+ ) {
+ // TODO: determine the optimal parameters based on grid utilization
+ // I still don't know why we should not always use the maximum available threads:
+ //
+ // nsg = pipeline.maxTotalThreadsPerThreadgroup / 32
+ //
+ // my current hypothesis is that the work grid is not evenly divisible for different nsg
+ // values and there can be some tail effects when nsg is high. need to confirm this
+ //
+ const int nsg = 2; // num simdgroups per threadgroup
+
+ // num threads along row per simdgroup
+ int16_t nxpsg = 0;
+ if (ne00 % 256 == 0 && ne11 < 3) {
+ nxpsg = 16;
+ } else if (ne00 % 128 == 0) {
+ nxpsg = 8;
+ } else {
+ nxpsg = 4;
+ }
+
+ const int16_t nypsg = 32/nxpsg; // num threads along col per simdgroup (i.e. a simdgroup processes that many src0 rows at a time)
+ const int16_t r0ptg = nypsg*nsg; // num src0 rows per threadgroup
+ int16_t r1ptg = 4; // num src1 rows per threadgroup
+
+ // note: not sure how optimal are those across all different hardware. there might be someting cleverer
+ switch (ne11) {
+ case 2:
+ r1ptg = 2; break;
+ case 3:
+ case 6:
+ r1ptg = 3; break;
+ case 4:
+ case 7:
+ case 8:
+ r1ptg = 4; break;
+ case 5:
+ r1ptg = 5; break;
+ default:
+ GGML_ABORT("unsupported ne11");
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_mul_mv_ext(lib, op->src[0]->type, op->src[1]->type, nsg, nxpsg, r1ptg);
+
+ ggml_metal_kargs_mul_mv_ext args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.r2 =*/ r2,
+ /*.r3 =*/ r3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + r0ptg - 1)/r0ptg), ((ne11 + r1ptg - 1)/r1ptg), ne12*ne13, 32, nsg, 1);
+ } else if (
+ !ggml_is_transposed(op->src[0]) &&
+ !ggml_is_transposed(op->src[1]) &&
+ // for now the matrix-matrix multiplication kernel only works on A14+/M1+ SoCs
+ // AMD GPU and older A-chips will reuse matrix-vector multiplication kernel
+ props_dev->has_simdgroup_mm && ne00 >= 64 && ne11 > ne11_mm_min) {
+ //GGML_LOG_INFO("matrix: ne00 = %6d, ne01 = %6d, ne02 = %6d, ne11 = %6d, ne12 = %6d\n", ne00, ne01, ne02, ne11, ne12);
+
+ // some Metal matrix data types require aligned pointers
+ // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
+ //switch (op->src[0]->type) {
+ // case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break;
+ // case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break;
+ // case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8 == 0); break;
+ // default: break;
+ //}
+
+ auto pipeline = ggml_metal_library_get_pipeline_mul_mm(lib, op);
+
+ ggml_metal_kargs_mul_mm args = {
+ /*.ne00 =*/ ne00,
+ /*.ne02 =*/ ne02,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne12 =*/ ne12,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.r2 =*/ r2,
+ /*.r3 =*/ r3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+ ggml_metal_encoder_dispatch_threadgroups(enc, ((ne11 + 31)/32), ((ne01 + 63)/64), ne12*ne13, 128, 1, 1);
+ } else {
+ auto pipeline = ggml_metal_library_get_pipeline_mul_mv(lib, op);
+
+ const int nr0 = pipeline.nr0;
+ const int nr1 = pipeline.nr1;
+ const int nsg = pipeline.nsg;
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_kargs_mul_mv args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.nr0 =*/ nr0,
+ /*.r2 =*/ r2,
+ /*.r3 =*/ r3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ if (op->src[0]->type == GGML_TYPE_F32 ||
+ op->src[0]->type == GGML_TYPE_F16 ||
+ op->src[0]->type == GGML_TYPE_BF16 ||
+ op->src[0]->type == GGML_TYPE_Q8_0) {
+ ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + nr0 - 1)/(nr0)), ((ne11 + nr1 - 1)/nr1), ne12*ne13, 32, nsg, 1);
+ } else {
+ ggml_metal_encoder_dispatch_threadgroups(enc, ((ne01 + nr0*nsg - 1)/(nr0*nsg)), ((ne11 + nr1 - 1)/nr1), ne12*ne13, 32, nsg, 1);
+ }
+ }
+
+ return 1;
+}
+
+size_t ggml_metal_op_mul_mat_id_extra_tpe(const ggml_tensor * op) {
+ assert(op->op == GGML_OP_MUL_MAT_ID);
+
+ const int64_t ne02 = op->src[0]->ne[2]; // n_expert
+
+ return ggml_type_size(GGML_TYPE_I32)*ne02;
+}
+
+size_t ggml_metal_op_mul_mat_id_extra_ids(const ggml_tensor * op) {
+ assert(op->op == GGML_OP_MUL_MAT_ID);
+
+ const int64_t ne02 = op->src[0]->ne[2]; // n_expert
+ const int64_t ne21 = op->src[2]->ne[1]; // n_token
+
+ return ggml_type_size(GGML_TYPE_I32)*ne02*ne21;
+}
+
+int ggml_metal_op_mul_mat_id(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev);
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ // src2 = ids
+ GGML_ASSERT(op->src[2]->type == GGML_TYPE_I32);
+
+ GGML_ASSERT(!ggml_is_transposed(op->src[0]));
+ GGML_ASSERT(!ggml_is_transposed(op->src[1]));
+
+ GGML_ASSERT(ne03 == 1);
+ GGML_ASSERT(ne13 == 1);
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]);
+ ggml_metal_buffer_id bid_src2 = ggml_metal_get_buffer_id(op->src[2]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ const uint32_t r2 = 1;
+ const uint32_t r3 = 1;
+
+ // find the break-even point where the matrix-matrix kernel becomes more efficient compared
+ // to the matrix-vector kernel
+ // ne20 = n_used_experts
+ // ne21 = n_rows (batch size)
+ const int ne21_mm_id_min = 32;
+
+ if (props_dev->has_simdgroup_mm && ne00 >= 64 && (ne21 >= ne21_mm_id_min)) {
+ // some Metal matrix data types require aligned pointers
+ // ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf (Table 2.5)
+ //switch (op->src[0]->type) {
+ // case GGML_TYPE_F32: GGML_ASSERT(nb01 % 16 == 0); break;
+ // case GGML_TYPE_F16: GGML_ASSERT(nb01 % 8 == 0); break;
+ // case GGML_TYPE_BF16: GGML_ASSERT(nb01 % 8 == 0); break;
+ // default: break;
+ //}
+
+ // extra buffers for intermediate id mapping
+ ggml_metal_buffer_id bid_tpe = bid_dst;
+ bid_tpe.offs += ggml_nbytes(op);
+
+ ggml_metal_buffer_id bid_ids = bid_tpe;
+ bid_ids.offs += ggml_metal_op_mul_mat_id_extra_tpe(op);
+
+ {
+ ggml_metal_kargs_mul_mm_id_map0 args = {
+ ne02,
+ ne10,
+ ne11, // n_expert_used (bcast)
+ nb11,
+ nb12,
+ ne21, // n_tokens
+ ne20, // n_expert_used
+ nb21,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_mul_mm_id_map0(lib, ne02, ne20);
+
+ const size_t smem = pipeline.smem;
+
+ GGML_ASSERT(ne02 <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ GGML_ASSERT(smem <= props_dev->max_theadgroup_memory_size);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src2, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_tpe, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_ids, 3);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, ne02, 1, 1);
+ }
+
+ // this barrier is always needed because the next kernel has to wait for the id maps to be computed
+ ggml_metal_op_concurrency_reset(ctx);
+
+ {
+ auto pipeline = ggml_metal_library_get_pipeline_mul_mm_id(lib, op);
+
+ ggml_metal_kargs_mul_mm_id args = {
+ /*.ne00 =*/ ne00,
+ /*.ne02 =*/ ne02,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne11 =*/ ne11, // n_expert_used (bcast)
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne20 =*/ ne20, // n_expert_used
+ /*.ne21 =*/ ne21, // n_tokens
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.r2 =*/ r2,
+ /*.r3 =*/ r3,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_src1, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_tpe, 3);
+ ggml_metal_encoder_set_buffer (enc, bid_ids, 4);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 5);
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne21 + 31)/32, (ne01 + 63)/64, ne02, 128, 1, 1);
+ }
+ } else {
+ auto pipeline = ggml_metal_library_get_pipeline_mul_mv_id(lib, op);
+
+ const int nr0 = pipeline.nr0;
+ const int nr1 = pipeline.nr1;
+ const int nsg = pipeline.nsg;
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_kargs_mul_mv_id args = {
+ /*.nei0 =*/ ne20,
+ /*.nei1 =*/ ne21,
+ /*.nbi1 =*/ nb21,
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.ne13 =*/ ne13,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.nb1 =*/ nb1,
+ /*.nr0 =*/ nr0,
+ };
+
+ if (ggml_is_quantized(op->src[0]->type)) {
+ GGML_ASSERT(ne00 >= nsg*nr0);
+ }
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer(enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer(enc, bid_src1, 2);
+ ggml_metal_encoder_set_buffer(enc, bid_dst, 3);
+ ggml_metal_encoder_set_buffer(enc, bid_src2, 4);
+
+ const int64_t _ne1 = 1;
+ const int64_t ne123 = ne20*ne21;
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ if (op->src[0]->type == GGML_TYPE_F32 ||
+ op->src[0]->type == GGML_TYPE_F16 ||
+ op->src[0]->type == GGML_TYPE_BF16 ||
+ op->src[0]->type == GGML_TYPE_Q8_0) {
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nr0 - 1)/(nr0), (_ne1 + nr1 - 1)/nr1, ne123, 32, nsg, 1);
+ } else {
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nr0*nsg - 1)/(nr0*nsg), (_ne1 + nr1 - 1)/nr1, ne123, 32, nsg, 1);
+ }
+ }
+
+ return 1;
+}
+
+int ggml_metal_op_add_id(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[2]->type == GGML_TYPE_I32);
+ GGML_ASSERT(op->type == GGML_TYPE_F32);
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+ ggml_metal_kargs_add_id args = {
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb11 =*/ nb11,
+ /*.nb21 =*/ nb21,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_base(lib, GGML_OP_ADD_ID);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 4);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne00);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, 1, nth, 1, 1);
+
+ return 1;
+}
+
+bool ggml_metal_op_flash_attn_ext_use_vec(const ggml_tensor * op) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+ const int64_t ne00 = op->src[0]->ne[0]; // head size
+ const int64_t ne01 = op->src[0]->ne[1]; // batch size
+
+ // use vec kernel if the batch size is small and if the head size is supported
+ return (ne01 < 20) && (ne00 % 32 == 0);
+}
+
+size_t ggml_metal_op_flash_attn_ext_extra_pad(const ggml_tensor * op) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb);
+
+ size_t res = 0;
+
+ const bool has_mask = op->src[3] != nullptr;
+
+ // note: the non-vec kernel requires more extra memory, so always reserve for it
+ GGML_ASSERT(OP_FLASH_ATTN_EXT_NCPSG >= OP_FLASH_ATTN_EXT_VEC_NCPSG);
+
+ //if (ggml_metal_op_flash_attn_ext_use_vec(op)) {
+ if (false) {
+ // note: always reserve the padding space to avoid graph reallocations
+ //const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_VEC_NCPSG != 0;
+ const bool has_kvpad = true;
+
+ if (has_kvpad) {
+ res += OP_FLASH_ATTN_EXT_VEC_NCPSG*(
+ nb11*ne12*ne13 +
+ nb21*ne22*ne23 +
+ (has_mask ? ggml_type_size(GGML_TYPE_F16)*ne31*ne32*ne33 : 0));
+ }
+ } else {
+ //const bool has_kvpad = ne11 % OP_FLASH_ATTN_EXT_NCPSG != 0;
+ const bool has_kvpad = true;
+
+ if (has_kvpad) {
+ res += OP_FLASH_ATTN_EXT_NCPSG*(
+ nb11*ne12*ne13 +
+ nb21*ne22*ne23 +
+ (has_mask ? ggml_type_size(GGML_TYPE_F16)*ne31*ne32*ne33 : 0));
+ }
+ }
+
+ return res;
+}
+
+size_t ggml_metal_op_flash_attn_ext_extra_blk(const ggml_tensor * op) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ //GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ //GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ //GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ //GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ //GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb);
+
+ size_t res = 0;
+
+ const bool has_mask = op->src[3] != nullptr;
+
+ if (!has_mask) {
+ return res;
+ }
+
+ const bool is_vec = ggml_metal_op_flash_attn_ext_use_vec(op);
+
+ // this optimization is not useful for the vector kernels
+ // note: always reserve the blk buffer to avoid graph reallocations
+ //if (is_vec) {
+ // return res;
+ //}
+
+ const int nqptg = is_vec ? OP_FLASH_ATTN_EXT_VEC_NQPSG : OP_FLASH_ATTN_EXT_NQPSG;
+ const int ncpsg = is_vec ? OP_FLASH_ATTN_EXT_VEC_NCPSG : OP_FLASH_ATTN_EXT_NCPSG;
+
+ const int64_t ne1 = (ne01 + nqptg - 1)/nqptg;
+ const int64_t ne0 = (ne30 + ncpsg - 1)/ncpsg;
+
+ res += GGML_PAD(ggml_type_size(GGML_TYPE_I8)*ne0*ne1*ne32*ne33, 32);
+
+ return res;
+}
+
+size_t ggml_metal_op_flash_attn_ext_extra_tmp(const ggml_tensor * op) {
+ assert(op->op == GGML_OP_FLASH_ATTN_EXT);
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ //GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ //GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ //GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne);
+ //GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb);
+
+ size_t res = 0;
+
+ // note: always reserve the temp buffer to avoid graph reallocations
+ //if (ggml_metal_op_flash_attn_ext_use_vec(op)) {
+ if (true) {
+ const int64_t nwg = 32;
+ const int64_t ne01_max = std::min(ne01, 32);
+
+ // temp buffer for writing the results from each workgroup
+ // - ne20: the size of the Value head
+ // - + 2: the S and M values for each intermediate result
+ res += ggml_type_size(GGML_TYPE_F32)*(ne01_max*ne02*ne03*nwg*(ne20 + 2));
+ }
+
+ return res;
+}
+
+int ggml_metal_op_flash_attn_ext(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx->dev);
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne2, op->src[2], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb2, op->src[2], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne3, op->src[3], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb3, op->src[3], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS( int32_t, nb, op, nb);
+
+ GGML_ASSERT(ne00 % 4 == 0);
+
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == op->src[2]->type);
+
+ //GGML_ASSERT(ggml_are_same_shape (src1, src2));
+ GGML_ASSERT(ne11 == ne21);
+ GGML_ASSERT(ne12 == ne22);
+
+ GGML_ASSERT(!op->src[3] || op->src[3]->type == GGML_TYPE_F16);
+ GGML_ASSERT(!op->src[3] || op->src[3]->ne[1] >= op->src[0]->ne[1] &&
+ "the Flash-Attention Metal kernel requires the mask to be at least n_queries big");
+
+ float scale;
+ float max_bias;
+ float logit_softcap;
+
+ memcpy(&scale, ((const int32_t *) op->op_params) + 0, sizeof(scale));
+ memcpy(&max_bias, ((const int32_t *) op->op_params) + 1, sizeof(max_bias));
+ memcpy(&logit_softcap, ((const int32_t *) op->op_params) + 2, sizeof(logit_softcap));
+
+ if (logit_softcap != 0.0f) {
+ scale /= logit_softcap;
+ }
+
+ const bool has_mask = op->src[3] != NULL;
+ const bool has_sinks = op->src[4] != NULL;
+ const bool has_bias = max_bias != 0.0f;
+ const bool has_scap = logit_softcap != 0.0f;
+
+ const uint32_t n_head = op->src[0]->ne[2];
+ const int32_t n_head_log2 = 1u << (uint32_t) floorf(log2f((float) n_head));
+
+ const float m0 = powf(2.0f, -(max_bias ) / n_head_log2);
+ const float m1 = powf(2.0f, -(max_bias / 2.0f) / n_head_log2);
+
+ GGML_ASSERT(ne01 < 65536);
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]);
+ ggml_metal_buffer_id bid_src2 = ggml_metal_get_buffer_id(op->src[2]);
+ ggml_metal_buffer_id bid_src3 = has_mask ? ggml_metal_get_buffer_id(op->src[3]) : bid_src0;
+ ggml_metal_buffer_id bid_src4 = has_sinks ? ggml_metal_get_buffer_id(op->src[4]) : bid_src0;
+
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ ggml_metal_buffer_id bid_pad = bid_dst;
+ bid_pad.offs += ggml_nbytes(op);
+
+ ggml_metal_buffer_id bid_blk = bid_pad;
+ bid_blk.offs += ggml_metal_op_flash_attn_ext_extra_pad(op);
+
+ ggml_metal_buffer_id bid_tmp = bid_blk;
+ bid_tmp.offs += ggml_metal_op_flash_attn_ext_extra_blk(op);
+
+ if (!ggml_metal_op_flash_attn_ext_use_vec(op)) {
+ // half8x8 kernel
+ const int nqptg = OP_FLASH_ATTN_EXT_NQPSG; // queries per threadgroup
+ const int ncpsg = OP_FLASH_ATTN_EXT_NCPSG; // cache values per simdgroup
+
+ GGML_ASSERT(nqptg <= 32);
+ GGML_ASSERT(nqptg % 8 == 0);
+ GGML_ASSERT(ncpsg % 32 == 0);
+
+ bool need_sync = false;
+
+ const bool has_kvpad = ne11 % ncpsg != 0;
+
+ if (has_kvpad) {
+ assert(ggml_metal_op_flash_attn_ext_extra_pad(op) != 0);
+
+ ggml_metal_kargs_flash_attn_ext_pad args0 = {
+ /*.ne11 =*/ne11,
+ /*.ne_12_2 =*/ne12,
+ /*.ne_12_3 =*/ne13,
+ /*.nb11 =*/nb11,
+ /*.nb12 =*/nb12,
+ /*.nb13 =*/nb13,
+ /*.nb21 =*/nb21,
+ /*.nb22 =*/nb22,
+ /*.nb23 =*/nb23,
+ /*.ne31 =*/ne31,
+ /*.ne32 =*/ne32,
+ /*.ne33 =*/ne33,
+ /*.nb31 =*/nb31,
+ /*.nb32 =*/nb32,
+ /*.nb33 =*/nb33,
+ };
+
+ auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_pad(lib, op, has_mask, ncpsg);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline0);
+ ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src1, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_src2, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_src3, 3);
+ ggml_metal_encoder_set_buffer (enc, bid_pad, 4);
+
+ assert(ne12 == ne22);
+ assert(ne13 == ne23);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ncpsg, std::max(ne12, ne32), std::max(ne13, ne33), 32, 1, 1);
+
+ need_sync = true;
+ }
+
+ if (has_mask) {
+ assert(ggml_metal_op_flash_attn_ext_extra_blk(op) != 0);
+
+ ggml_metal_kargs_flash_attn_ext_blk args0 = {
+ /*.ne01 =*/ ne01,
+ /*.ne30 =*/ ne30,
+ /*.ne31 =*/ ne31,
+ /*.ne32 =*/ ne32,
+ /*.ne33 =*/ ne33,
+ /*.nb31 =*/ nb31,
+ /*.nb32 =*/ nb32,
+ /*.nb33 =*/ nb33,
+ };
+
+ auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_blk(lib, op, nqptg, ncpsg);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline0);
+ ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src3, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_blk, 2);
+
+ const int32_t nblk1 = ((ne01 + nqptg - 1)/nqptg);
+ const int32_t nblk0 = ((ne30 + ncpsg - 1)/ncpsg);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nblk0, nblk1, ne32*ne33, 32, 1, 1);
+
+ need_sync = true;
+ }
+
+ if (need_sync) {
+ ggml_metal_op_concurrency_reset(ctx);
+ }
+
+ const int is_q = ggml_is_quantized(op->src[1]->type) ? 1 : 0;
+
+ // 2*(2*ncpsg)
+ // ncpsg soft_max values + ncpsg mask values
+ //
+ // 16*32*(nsg)
+ // the shared memory needed for the simdgroups to load the KV cache
+ // each thread loads (dequantizes) 16 head elements, there are 32 threads in th SG
+ //
+#define FATTN_SMEM(nsg) (GGML_PAD((nqptg*(ne00 + 2*GGML_PAD(ne20, 64) + 2*(2*ncpsg)) + is_q*(16*32*(nsg)))*(sizeof(float)/2), 16))
+
+ //int64_t nsgmax = 4;
+ //
+ //if (is_q) {
+ // nsgmax = 2;
+ // while (true) {
+ // const size_t smem = FATTN_SMEM(nsgmax);
+ // if (smem > props_dev->max_theadgroup_memory_size) {
+ // break;
+ // }
+ // nsgmax *= 2;
+ // }
+ // nsgmax /= 2;
+ //}
+
+ // simdgroups per threadgroup (a.k.a. warps)
+ //nsg = ne01 <= nqptg ? MAX(4, MIN(nsgmax, MIN(ne11/ncpsg, (int64_t) pipeline.maxTotalThreadsPerThreadgroup/32))) : 4;
+ int32_t nsg = ne00 >= 512 ? 8 : 4;
+
+ const size_t smem = FATTN_SMEM(nsg);
+
+ ggml_metal_kargs_flash_attn_ext args = {
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne11 =*/ ne11,
+ /*.ne_12_2 =*/ ne12,
+ /*.ne_12_3 =*/ ne13,
+ /*.ns10 =*/ int32_t(nb11/nb10),
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ns20 =*/ int32_t(nb21/nb20),
+ /*.nb21 =*/ nb21,
+ /*.nb22 =*/ nb22,
+ /*.nb23 =*/ nb23,
+ /*.ne31 =*/ ne31,
+ /*.ne32 =*/ ne32,
+ /*.ne33 =*/ ne33,
+ /*.nb31 =*/ nb31,
+ /*.nb32 =*/ nb32,
+ /*.nb33 =*/ nb33,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.scale =*/ scale,
+ /*.max_bias =*/ max_bias,
+ /*.m0 =*/ m0,
+ /*.m1 =*/ m1,
+ /*.n_head_log2 =*/ n_head_log2,
+ /*.logit_softcap =*/ logit_softcap,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_flash_attn_ext(lib, op, has_mask, has_sinks, has_bias, has_scap, has_kvpad, nsg);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_src1, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_src2, 3);
+ ggml_metal_encoder_set_buffer (enc, bid_src3, 4);
+ ggml_metal_encoder_set_buffer (enc, bid_src4, 5);
+ ggml_metal_encoder_set_buffer (enc, bid_pad, 6);
+ ggml_metal_encoder_set_buffer (enc, bid_blk, 7);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 8);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, ne02, ne03, 32, nsg, 1);
+#undef FATTN_SMEM
+ } else {
+ // half4x4 kernel
+ const int nqptg = OP_FLASH_ATTN_EXT_VEC_NQPSG; // queries per threadgroup
+ const int ncpsg = OP_FLASH_ATTN_EXT_VEC_NCPSG; // cache values per simdgroup !! sync with kernel template arguments !!
+ const int nhptg = 1; // heads per threadgroup
+
+ GGML_ASSERT(nqptg <= 32);
+ GGML_ASSERT(nqptg % 1 == 0);
+ GGML_ASSERT(ncpsg % 32 == 0);
+
+ bool need_sync = false;
+
+ const bool has_kvpad = ne11 % ncpsg != 0;
+
+ if (has_kvpad) {
+ assert(ggml_metal_op_flash_attn_ext_extra_pad(op) != 0);
+
+ ggml_metal_kargs_flash_attn_ext_pad args0 = {
+ /*.ne11 =*/ne11,
+ /*.ne_12_2 =*/ne12,
+ /*.ne_12_3 =*/ne13,
+ /*.nb11 =*/nb11,
+ /*.nb12 =*/nb12,
+ /*.nb13 =*/nb13,
+ /*.nb21 =*/nb21,
+ /*.nb22 =*/nb22,
+ /*.nb23 =*/nb23,
+ /*.ne31 =*/ne31,
+ /*.ne32 =*/ne32,
+ /*.ne33 =*/ne33,
+ /*.nb31 =*/nb31,
+ /*.nb32 =*/nb32,
+ /*.nb33 =*/nb33,
+ };
+
+ auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_pad(lib, op, has_mask, ncpsg);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline0);
+ ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src1, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_src2, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_src3, 3);
+ ggml_metal_encoder_set_buffer (enc, bid_pad, 4);
+
+ assert(ne12 == ne22);
+ assert(ne13 == ne23);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ncpsg, std::max(ne12, ne32), std::max(ne13, ne33), 32, 1, 1);
+
+ need_sync = true;
+ }
+
+ if (need_sync) {
+ ggml_metal_op_concurrency_reset(ctx);
+ }
+
+ // note: for simplicity assume the K is larger or equal than V
+ GGML_ASSERT(ne10 >= ne20);
+
+ // ne00 + 2*ncpsg*(nsg)
+ // for each query, we load it as f16 in shared memory (ne00)
+ // and store the soft_max values and the mask
+ //
+ // ne20*(nsg)
+ // each simdgroup has a full f32 head vector in shared mem to accumulate results
+ //
+#define FATTN_SMEM(nsg) (GGML_PAD(((GGML_PAD(ne00, 128) + 4*ncpsg + 2*GGML_PAD(ne20, 128))*(nsg))*(sizeof(float)/2), 16))
+
+ int64_t nsg = 1;
+
+ // workgroups
+ // each workgroup handles nsg*nkpsg cache values
+ int32_t nwg = 1;
+ if (false) {
+ // for small KV caches, we could launch a single workgroup and write the results directly to dst/
+ // however, this does not lead to significant improvement, so disabled
+ nwg = 1;
+ nsg = 4;
+ } else {
+ nwg = 32;
+ nsg = 1;
+ while (2*nwg*nsg*ncpsg < ne11 && nsg < 4) {
+ nsg *= 2;
+ }
+ }
+
+ ggml_metal_kargs_flash_attn_ext_vec args = {
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne11 =*/ ne11,
+ /*.ne_12_2 =*/ ne12,
+ /*.ne_12_3 =*/ ne13,
+ /*.ns10 =*/ int32_t(nb11/nb10),
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ns20 =*/ int32_t(nb21/nb20),
+ /*.nb21 =*/ nb21,
+ /*.nb22 =*/ nb22,
+ /*.nb23 =*/ nb23,
+ /*.ne31 =*/ ne31,
+ /*.ne32 =*/ ne32,
+ /*.ne33 =*/ ne33,
+ /*.nb31 =*/ nb31,
+ /*.nb32 =*/ nb32,
+ /*.nb33 =*/ nb33,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.scale =*/ scale,
+ /*.max_bias =*/ max_bias,
+ /*.m0 =*/ m0,
+ /*.m1 =*/ m1,
+ /*.n_head_log2 =*/ n_head_log2,
+ /*.logit_softcap =*/ logit_softcap,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_flash_attn_ext_vec(lib, op, has_mask, has_sinks, has_bias, has_scap, has_kvpad, nsg, nwg);
+
+ GGML_ASSERT(nsg*32 <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_src1, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_src2, 3);
+ ggml_metal_encoder_set_buffer (enc, bid_src3, 4);
+ ggml_metal_encoder_set_buffer (enc, bid_src4, 5);
+
+ const size_t smem = FATTN_SMEM(nsg);
+
+ //printf("smem: %zu, max: %zu, nsg = %d, nsgmax = %d\n", smem, props_dev->max_theadgroup_memory_size, (int) nsg, (int) nsgmax);
+ GGML_ASSERT(smem <= props_dev->max_theadgroup_memory_size);
+
+ if (nwg == 1) {
+ assert(ggml_metal_op_flash_attn_ext_extra_tmp(op) == 0);
+
+ // using 1 workgroup -> write the result directly into dst
+ ggml_metal_encoder_set_buffer(enc, bid_pad, 6);
+ ggml_metal_encoder_set_buffer(enc, bid_dst, 7);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, (ne02 + nhptg - 1)/nhptg, ne03*nwg, 32, nsg, 1);
+ } else {
+ // sanity checks
+ assert(ggml_metal_op_flash_attn_ext_extra_tmp(op) != 0);
+
+ GGML_ASSERT(ne01*ne02*ne03 == ne1*ne2*ne3);
+ GGML_ASSERT((uint64_t)ne1*ne2*ne3 <= (1u << 31));
+
+ // write the results from each workgroup into a temp buffer
+ ggml_metal_encoder_set_buffer(enc, bid_pad, 6);
+ ggml_metal_encoder_set_buffer(enc, bid_tmp, 7);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+ ggml_metal_encoder_dispatch_threadgroups(enc, (ne01 + nqptg - 1)/nqptg, (ne02 + nhptg - 1)/nhptg, ne03*nwg, 32, nsg, 1);
+
+ // sync the 2 kernels
+ ggml_metal_op_concurrency_reset(ctx);
+
+ // reduce the results from the workgroups
+ {
+ const int32_t nrows = ne1*ne2*ne3;
+
+ ggml_metal_kargs_flash_attn_ext_vec_reduce args0 = {
+ nrows,
+ };
+
+ auto pipeline0 = ggml_metal_library_get_pipeline_flash_attn_ext_vec_reduce(lib, op, ne20, nwg);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline0);
+ ggml_metal_encoder_set_bytes (enc, &args0, sizeof(args0), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, 32*nwg, 1, 1);
+ }
+ }
+#undef FATTN_SMEM
+ }
+
+ return 1;
+}
+
+int ggml_metal_op_bin(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ const bool use_fusion = ctx->use_fusion;
+
+ const int debug_fusion = ctx->debug_fusion;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[1]));
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_src1 = ggml_metal_get_buffer_id(op->src[1]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ ggml_metal_kargs_bin args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne10 =*/ ne10,
+ /*.ne11 =*/ ne11,
+ /*.ne12 =*/ ne12,
+ /*.ne13 =*/ ne13,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.offs =*/ 0,
+ /*.o1 =*/ { bid_src1.offs },
+ };
+
+ ggml_op fops[8];
+
+ int n_fuse = 1;
+
+ // c[0] = add(a, b[0])
+ // c[1] = add(c[0], b[1])
+ // c[2] = add(c[1], b[2])
+ // ...
+ if (use_fusion) {
+ fops[0] = GGML_OP_ADD;
+ fops[1] = GGML_OP_ADD;
+ fops[2] = GGML_OP_ADD;
+ fops[3] = GGML_OP_ADD;
+ fops[4] = GGML_OP_ADD;
+ fops[5] = GGML_OP_ADD;
+ fops[6] = GGML_OP_ADD;
+ fops[7] = GGML_OP_ADD;
+
+ // note: in metal, we sometimes encode the graph in parallel so we have to avoid fusing ops
+ // across splits. idx_end indicates the last node in the current split
+ for (n_fuse = 0; n_fuse <= 6; ++n_fuse) {
+ if (!ctx->can_fuse(idx + n_fuse, fops + n_fuse, 2)) {
+ break;
+ }
+
+ ggml_tensor * f0 = ctx->node(idx + n_fuse);
+ ggml_tensor * f1 = ctx->node(idx + n_fuse + 1);
+
+ if (f0 != f1->src[0]) {
+ break;
+ }
+
+ // b[0] === b[1] === ...
+ if (!ggml_are_same_layout(f0->src[1], f1->src[1])) {
+ break;
+ }
+
+ // only fuse ops if src1 is in the same Metal buffer
+ ggml_metal_buffer_id bid_fuse = ggml_metal_get_buffer_id(f1->src[1]);
+ if (bid_fuse.metal != bid_src1.metal) {
+ break;
+ }
+
+ //ctx->fuse_cnt[ops[n_fuse + 1]->op]++;
+
+ args.o1[n_fuse + 1] = bid_fuse.offs;
+ }
+
+ ++n_fuse;
+
+ if (debug_fusion > 1 && n_fuse > 1) {
+ GGML_LOG_DEBUG("%s: fuse: ADD x %d\n", __func__, n_fuse);
+ }
+ }
+
+ // the offsets of src1 and all fused buffers are relative to the start of the src1 buffer
+ bid_src1.offs = 0;
+
+ struct ggml_metal_pipeline_with_params pipeline;
+
+ pipeline = ggml_metal_library_get_pipeline_bin(lib, op, n_fuse);
+
+ if (n_fuse > 1) {
+ bid_dst = ggml_metal_get_buffer_id(ctx->node(idx + n_fuse - 1));
+
+ for (int i = 1; i < n_fuse; ++i) {
+ if (!ggml_metal_op_concurrency_check(ctx, ctx->node(idx + i))) {
+ ggml_metal_op_concurrency_reset(ctx);
+
+ break;
+ }
+ }
+ }
+
+ if (pipeline.c4) {
+ args.ne00 = ne00/4;
+ args.ne10 = ne10/4;
+ args.ne0 = ne0/4;
+ }
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_src1, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 3);
+
+ if (pipeline.cnt) {
+ const int n = pipeline.c4 ? ggml_nelements(op)/4 : ggml_nelements(op);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, 1, 1, 1);
+ } else {
+ const int nth_max = MIN(256, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ int nth = 1;
+
+ while (2*nth < args.ne0 && nth < nth_max) {
+ nth *= 2;
+ }
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+ }
+
+ return n_fuse;
+}
+
+int ggml_metal_op_l2_norm(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ float eps;
+ memcpy(&eps, op->op_params, sizeof(float));
+
+ ggml_metal_kargs_l2_norm args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.eps =*/ eps,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_l2_norm(lib, op);
+
+ if (pipeline.c4) {
+ args.ne00 = ne00/4;
+ args.ne0 = ne0/4;
+ }
+
+ int nth = 32; // SIMD width
+
+ while (nth < ne00 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_group_norm(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int32_t ngrp = ((const int32_t *) op->op_params)[0];
+
+ float eps;
+ memcpy(&eps, op->op_params + 1, sizeof(float));
+
+ ggml_metal_kargs_group_norm args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.ngrp =*/ ngrp,
+ /*.eps =*/ eps,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_group_norm(lib, op);
+
+ int nth = 32; // SIMD width
+ //while (nth < ne00/4 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ // nth *= 2;
+ //}
+
+ //nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+ //nth = std::min(nth, ne00/4);
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ngrp, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_norm(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ const bool use_fusion = ctx->use_fusion;
+
+ const int debug_fusion = ctx->debug_fusion;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ float eps;
+ memcpy(&eps, op->op_params, sizeof(float));
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ ggml_metal_kargs_norm args = {
+ /*.ne00 =*/ ne00,
+ /*.ne00_t =*/ ne00 % 4 == 0 ? ne00/4 : ne00,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.eps =*/ eps,
+ /*.nef1 =*/ { ne01 },
+ /*.nef2 =*/ { ne02 },
+ /*.nef3 =*/ { ne03 },
+ /*.nbf1 =*/ { nb01 },
+ /*.nbf2 =*/ { nb02 },
+ /*.nbf3 =*/ { nb03 },
+ };
+
+ ggml_op fops[8];
+
+ int n_fuse = 1;
+
+ ggml_metal_buffer_id bid_fuse[2] = { bid_src0, bid_src0 };
+
+ // d[0] = norm(a)
+ // d[1] = mul(d[0], b)
+ // d[2] = add(d[1], c)
+ if (use_fusion) {
+ fops[0] = op->op;
+ fops[1] = GGML_OP_MUL;
+ fops[2] = GGML_OP_ADD;
+
+ for (n_fuse = 0; n_fuse <= 1; ++n_fuse) {
+ if (!ctx->can_fuse(idx + n_fuse, fops + n_fuse, 2)) {
+ break;
+ }
+
+ ggml_tensor * f0 = ctx->node(idx + n_fuse);
+ ggml_tensor * f1 = ctx->node(idx + n_fuse + 1);
+
+ if (f0 != f1->src[0]) {
+ break;
+ }
+
+ if (f1->src[1]->ne[0] != op->ne[0]) {
+ break;
+ }
+
+ if (!ggml_is_contiguous_rows(f1->src[1])) {
+ break;
+ }
+
+ if (f1->type != GGML_TYPE_F32) {
+ break;
+ }
+
+ //ctx->fuse_cnt[f1->op]++;
+
+ bid_fuse[n_fuse] = ggml_metal_get_buffer_id(f1->src[1]);
+
+ args.nef1[n_fuse + 1] = f1->src[1]->ne[1];
+ args.nef2[n_fuse + 1] = f1->src[1]->ne[2];
+ args.nef3[n_fuse + 1] = f1->src[1]->ne[3];
+
+ args.nbf1[n_fuse + 1] = f1->src[1]->nb[1];
+ args.nbf2[n_fuse + 1] = f1->src[1]->nb[2];
+ args.nbf3[n_fuse + 1] = f1->src[1]->nb[3];
+ }
+
+ ++n_fuse;
+
+ if (debug_fusion > 1 && n_fuse > 1) {
+ if (n_fuse == 2) {
+ GGML_LOG_DEBUG("%s: fuse: %s + MUL\n", __func__, ggml_op_name(op->op));
+ }
+ if (n_fuse == 3) {
+ GGML_LOG_DEBUG("%s: fuse: %s + MUL + ADD\n", __func__, ggml_op_name(op->op));
+ }
+ }
+ }
+
+ if (n_fuse > 1) {
+ bid_dst = ggml_metal_get_buffer_id(ctx->node(idx + n_fuse - 1));
+
+ for (int i = 1; i < n_fuse; ++i) {
+ if (!ggml_metal_op_concurrency_check(ctx, ctx->node(idx + i))) {
+ ggml_metal_op_concurrency_reset(ctx);
+
+ break;
+ }
+ }
+ }
+
+ auto pipeline = ggml_metal_library_get_pipeline_norm(lib, op, n_fuse);
+
+ int nth = 32; // SIMD width
+
+ while (nth < args.ne00_t && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+ nth = std::min(nth, args.ne00_t);
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_fuse[0], 2);
+ ggml_metal_encoder_set_buffer (enc, bid_fuse[1], 3);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 4);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+ return n_fuse;
+}
+
+int ggml_metal_op_rope(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ // make sure we have one or more position id(ne10) per token(ne02)
+ GGML_ASSERT(ne10 % ne02 == 0);
+ GGML_ASSERT(ne10 >= ne02);
+
+ const int nth = std::min(1024, ne00);
+
+ const int n_past = ((const int32_t *) op->op_params)[0];
+ const int n_dims = ((const int32_t *) op->op_params)[1];
+ //const int mode = ((const int32_t *) op->op_params)[2];
+ // skip 3, n_ctx, used in GLM RoPE, unimplemented in metal
+ const int n_ctx_orig = ((const int32_t *) op->op_params)[4];
+
+ float freq_base;
+ float freq_scale;
+ float ext_factor;
+ float attn_factor;
+ float beta_fast;
+ float beta_slow;
+
+ memcpy(&freq_base, (const int32_t *) op->op_params + 5, sizeof(float));
+ memcpy(&freq_scale, (const int32_t *) op->op_params + 6, sizeof(float));
+ memcpy(&ext_factor, (const int32_t *) op->op_params + 7, sizeof(float));
+ memcpy(&attn_factor, (const int32_t *) op->op_params + 8, sizeof(float));
+ memcpy(&beta_fast, (const int32_t *) op->op_params + 9, sizeof(float));
+ memcpy(&beta_slow, (const int32_t *) op->op_params + 10, sizeof(float));
+
+ // mrope
+ const int sect_0 = ((const int32_t *) op->op_params)[11];
+ const int sect_1 = ((const int32_t *) op->op_params)[12];
+ const int sect_2 = ((const int32_t *) op->op_params)[13];
+ const int sect_3 = ((const int32_t *) op->op_params)[14];
+
+ ggml_metal_kargs_rope args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.n_past =*/ n_past,
+ /*.n_dims =*/ n_dims,
+ /*.n_ctx_orig =*/ n_ctx_orig,
+ /*.freq_base =*/ freq_base,
+ /*.freq_scale =*/ freq_scale,
+ /*.ext_factor =*/ ext_factor,
+ /*.attn_factor =*/ attn_factor,
+ /*.beta_fast =*/ beta_fast,
+ /*.beta_slow =*/ beta_slow,
+ /* sect_0 =*/ sect_0,
+ /* sect_1 =*/ sect_1,
+ /* sect_2 =*/ sect_2,
+ /* sect_3 =*/ sect_3,
+ /* src2 =*/ op->src[2] != nullptr,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_rope(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ if (op->src[2]) {
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), 3);
+ } else {
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 3);
+ }
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 4);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_im2col(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int32_t s0 = ((const int32_t *)(op->op_params))[0];
+ const int32_t s1 = ((const int32_t *)(op->op_params))[1];
+ const int32_t p0 = ((const int32_t *)(op->op_params))[2];
+ const int32_t p1 = ((const int32_t *)(op->op_params))[3];
+ const int32_t d0 = ((const int32_t *)(op->op_params))[4];
+ const int32_t d1 = ((const int32_t *)(op->op_params))[5];
+
+ const bool is_2D = ((const int32_t *)(op->op_params))[6] == 1;
+
+ const int32_t N = op->src[1]->ne[is_2D ? 3 : 2];
+ const int32_t IC = op->src[1]->ne[is_2D ? 2 : 1];
+ const int32_t IH = is_2D ? op->src[1]->ne[1] : 1;
+ const int32_t IW = op->src[1]->ne[0];
+
+ const int32_t KH = is_2D ? op->src[0]->ne[1] : 1;
+ const int32_t KW = op->src[0]->ne[0];
+
+ const int32_t OH = is_2D ? op->ne[2] : 1;
+ const int32_t OW = op->ne[1];
+
+ const int32_t CHW = IC * KH * KW;
+
+ const uint64_t ofs0 = op->src[1]->nb[is_2D ? 3 : 2] / 4;
+ const uint64_t ofs1 = op->src[1]->nb[is_2D ? 2 : 1] / 4;
+
+ ggml_metal_kargs_im2col args = {
+ /*.ofs0 =*/ ofs0,
+ /*.ofs1 =*/ ofs1,
+ /*.IW =*/ IW,
+ /*.IH =*/ IH,
+ /*.CHW =*/ CHW,
+ /*.s0 =*/ s0,
+ /*.s1 =*/ s1,
+ /*.p0 =*/ p0,
+ /*.p1 =*/ p1,
+ /*.d0 =*/ d0,
+ /*.d1 =*/ d1,
+ /*.N =*/ N,
+ /*.KH =*/ KH,
+ /*.KW =*/ KW,
+ /*.KHW =*/ KH * KW,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_im2col(lib, op);
+
+ GGML_ASSERT(KH*KW <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ const uint64_t ntptg0 = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)/(KH*KW), N);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, IC, OH, OW, ntptg0, KH, KW);
+
+ return 1;
+}
+
+int ggml_metal_op_conv_2d(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ GGML_ASSERT(ggml_is_contiguous(op->src[0]));
+ GGML_ASSERT(op->src[1]->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->type == GGML_TYPE_F32);
+ GGML_ASSERT(op->src[0]->type == GGML_TYPE_F16 || op->src[0]->type == GGML_TYPE_F32);
+
+ const int32_t s0 = ((const int32_t *) op->op_params)[0];
+ const int32_t s1 = ((const int32_t *) op->op_params)[1];
+ const int32_t p0 = ((const int32_t *) op->op_params)[2];
+ const int32_t p1 = ((const int32_t *) op->op_params)[3];
+ const int32_t d0 = ((const int32_t *) op->op_params)[4];
+ const int32_t d1 = ((const int32_t *) op->op_params)[5];
+
+ ggml_metal_kargs_conv_2d args = {
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.IW =*/ ne10,
+ /*.IH =*/ ne11,
+ /*.KW =*/ ne00,
+ /*.KH =*/ ne01,
+ /*.IC =*/ ne02,
+ /*.OC =*/ ne03,
+ /*.OW =*/ ne0,
+ /*.OH =*/ ne1,
+ /*.N =*/ ne3,
+ /*.s0 =*/ s0,
+ /*.s1 =*/ s1,
+ /*.p0 =*/ p0,
+ /*.p1 =*/ p1,
+ /*.d0 =*/ d0,
+ /*.d1 =*/ d1,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_conv_2d(lib, op);
+
+ int nth = ggml_metal_pipeline_max_theads_per_threadgroup(pipeline);
+ nth = std::min(nth, 256);
+ nth = std::max(nth, 1);
+
+ const uint64_t n_out = ggml_nelements(op);
+
+ uint64_t tg = (n_out + nth - 1)/nth;
+ tg = std::max<uint64_t>(tg, 1);
+ tg = std::min<uint64_t>(tg, (uint64_t) std::numeric_limits<int>::max());
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, tg, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_conv_transpose_1d(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int32_t s0 = ((const int32_t *)(op->op_params))[0];
+
+ const int32_t IC = op->src[1]->ne[1];
+ const int32_t IL = op->src[1]->ne[0];
+
+ const int32_t K = op->src[0]->ne[0];
+
+ const int32_t OL = op->ne[0];
+ const int32_t OC = op->ne[1];
+
+ ggml_metal_kargs_conv_transpose_1d args = {
+ /*.IC =*/ IC,
+ /*.IL =*/ IL,
+ /*.K =*/ K,
+ /*.s0 =*/ s0,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_conv_transpose_1d(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, OL, OC, 1, 1, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_conv_transpose_2d(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne1, op->src[1], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int32_t s0 = ((const int32_t *)(op->op_params))[0];
+
+ const int32_t IC = op->src[1]->ne[2];
+ const int32_t IH = op->src[1]->ne[1];
+ const int32_t IW = op->src[1]->ne[0];
+
+ const int32_t KH = op->src[0]->ne[1];
+ const int32_t KW = op->src[0]->ne[0];
+
+ const int32_t OW = op->ne[0];
+ const int32_t OH = op->ne[1];
+ const int32_t OC = op->ne[2];
+
+ ggml_metal_kargs_conv_transpose_2d args = {
+ /*.IC =*/ IC,
+ /*.IH =*/ IH,
+ /*.IW =*/ IW,
+ /*.KH =*/ KH,
+ /*.KW =*/ KW,
+ /*.OC =*/ OC,
+ /*.s0 =*/ s0,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_conv_transpose_2d(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 3);
+
+ // Metal requires buffer size to be multiple of 16 bytes
+ const size_t smem = GGML_PAD(KW * KH * sizeof(float), 16);
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, OW, OH, OC, KW, KH, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_upscale(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const float sf0 = (float)ne0/op->src[0]->ne[0];
+ const float sf1 = (float)ne1/op->src[0]->ne[1];
+ const float sf2 = (float)ne2/op->src[0]->ne[2];
+ const float sf3 = (float)ne3/op->src[0]->ne[3];
+
+ ggml_metal_kargs_upscale args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.sf0 =*/ sf0,
+ /*.sf1 =*/ sf1,
+ /*.sf2 =*/ sf2,
+ /*.sf3 =*/ sf3
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_upscale(lib, op);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_pad(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_pad args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_pad(lib, op);
+
+ const int nth = std::min(1024, ne0);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_pad_reflect_1d(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_pad_reflect_1d args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ /*.p0 =*/ ((const int32_t *)(op->op_params))[0],
+ /*.p1 =*/ ((const int32_t *)(op->op_params))[1]
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_pad_reflect_1d(lib, op);
+
+ const int nth = std::min(1024, ne0);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne1, ne2, ne3, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_arange(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ float start;
+ float step;
+
+ memcpy(&start, ((const int32_t *) op->op_params) + 0, sizeof(float));
+ memcpy(&step, ((const int32_t *) op->op_params) + 2, sizeof(float));
+
+ ggml_metal_kargs_arange args = {
+ /*.ne0 =*/ ne0,
+ /*.start =*/ start,
+ /*.step =*/ step
+ };
+
+ const int nth = std::min(1024, ne0);
+
+ auto pipeline = ggml_metal_library_get_pipeline_arange(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 1);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_timestep_embedding(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ const int dim = op->op_params[0];
+ const int max_period = op->op_params[1];
+
+ ggml_metal_kargs_timestep_embedding args = {
+ /*.nb1 =*/ nb1,
+ /*.dim =*/ dim,
+ /*.max_period =*/ max_period,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_timestep_embedding(lib, op);
+
+ const int nth = std::max(1, std::min(1024, dim/2));
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne00, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_argmax(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_argmax args = {
+ /*.ne00 = */ ne00,
+ /*.nb01 = */ nb01,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_argmax(lib, op);
+
+ const int64_t nrows = ggml_nrows(op->src[0]);
+
+ int nth = 32; // SIMD width
+ while (nth < ne00 && nth*ne01*ne02*ne03 < 256) {
+ nth *= 2;
+ }
+
+ const size_t smem = pipeline.smem;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nrows, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_argsort(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_argsort(lib, op);
+
+ // bitonic sort requires the number of elements to be power of 2
+ int nth = 1;
+ while (nth < ne00 && 2*nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ const int npr = (ne00 + nth - 1)/nth;
+
+ // Metal kernels require the buffer size to be multiple of 16 bytes
+ // https://developer.apple.com/documentation/metal/mtlcomputecommandencoder/1443142-setthreadgroupmemorylength
+ const size_t smem = GGML_PAD(nth*sizeof(int32_t), 16);
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ ggml_metal_buffer_id bid_tmp = bid_dst;
+ bid_tmp.offs += ggml_nbytes(op);
+
+ if ((int) ceil(std::log(npr) / std::log(2)) % 2 == 1) {
+ std::swap(bid_dst, bid_tmp);
+ }
+
+ ggml_metal_kargs_argsort args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.top_k =*/ nth,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, npr*ne01, ne02, ne03, nth, 1, 1);
+
+ auto pipeline_merge = ggml_metal_library_get_pipeline_argsort_merge(lib, op);
+
+ int len = nth;
+
+ while (len < ne00) {
+ ggml_metal_op_concurrency_reset(ctx);
+
+ ggml_metal_kargs_argsort_merge args_merge = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.top_k =*/ ne00,
+ /*.len =*/ len,
+ };
+
+ // merges per row
+ const int nm = (ne00 + 2*len - 1) / (2*len);
+
+ const int nth = std::min(512, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline_merge));
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline_merge);
+ ggml_metal_encoder_set_bytes (enc, &args_merge, sizeof(args_merge), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nm*ne01, ne02, ne03, nth, 1, 1);
+
+ std::swap(bid_dst, bid_tmp);
+
+ len <<= 1;
+ }
+
+ return 1;
+}
+
+int ggml_metal_op_top_k(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_ASSERT(ggml_is_contiguous_rows(op->src[0]));
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_top_k(lib, op);
+
+ // bitonic sort requires the number of elements to be power of 2
+ int nth = 1;
+ while (nth < ne00 && 2*nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ // blocks per row
+ const int npr = (ne00 + nth - 1)/nth;
+
+ const size_t smem = GGML_PAD(nth*sizeof(int32_t), 16);
+
+ ggml_metal_buffer_id bid_src0 = ggml_metal_get_buffer_id(op->src[0]);
+ ggml_metal_buffer_id bid_dst = ggml_metal_get_buffer_id(op);
+
+ ggml_metal_buffer_id bid_tmp = bid_dst;
+ bid_tmp.offs += sizeof(int32_t)*ggml_nelements(op->src[0]);
+
+ if ((int) ceil(std::log(npr) / std::log(2)) % 2 == 1) {
+ std::swap(bid_dst, bid_tmp);
+ }
+
+ const int top_k = ne0;
+
+ ggml_metal_kargs_argsort args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.top_k =*/ std::min(nth, top_k), // for each block, keep just the top_k indices
+ };
+
+ if (npr > 1) {
+ args.ne0 = (npr - 1)*args.top_k + std::min(ne00 - (npr - 1)*nth, args.top_k);
+ }
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, npr*ne01, ne02, ne03, nth, 1, 1);
+
+ auto pipeline_merge = ggml_metal_library_get_pipeline_top_k_merge(lib, op);
+
+ int len = args.top_k;
+
+ while (len < args.ne0) {
+ ggml_metal_op_concurrency_reset(ctx);
+
+ // merges per row
+ const int nm = (args.ne0 + 2*len - 1) / (2*len);
+
+ const int nth = std::min(512, std::min(len, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline_merge)));
+
+ ggml_metal_kargs_argsort_merge args_merge = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ args.ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.top_k =*/ nm == 1 ? top_k : args.ne0, // the final merge outputs top_k elements
+ /*.len =*/ len,
+ };
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline_merge);
+ ggml_metal_encoder_set_bytes (enc, &args_merge, sizeof(args_merge), 0);
+ ggml_metal_encoder_set_buffer (enc, bid_src0, 1);
+ ggml_metal_encoder_set_buffer (enc, bid_dst, 2);
+ ggml_metal_encoder_set_buffer (enc, bid_tmp, 3);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, nm*ne01, ne02, ne03, nth, 1, 1);
+
+ std::swap(bid_dst, bid_tmp);
+
+ len <<= 1;
+ }
+
+ return 1;
+}
+
+int ggml_metal_op_tri(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ ggml_metal_kargs_tri args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.ne0 =*/ ne0,
+ /*.ne1 =*/ ne1,
+ /*.ne2 =*/ ne2,
+ /*.ne3 =*/ ne3,
+ /*.nb0 =*/ nb0,
+ /*.nb1 =*/ nb1,
+ /*.nb2 =*/ nb2,
+ /*.nb3 =*/ nb3,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_tri(lib, op);
+
+ int nth = 32; // SIMD width
+
+ while (nth < ne00 && nth < ggml_metal_pipeline_max_theads_per_threadgroup(pipeline)) {
+ nth *= 2;
+ }
+
+ nth = std::min(nth, ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+ nth = std::min(nth, ne00);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op), 2);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_opt_step_adamw(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_opt_step_adamw(lib, op);
+
+ const int64_t np = ggml_nelements(op->src[0]);
+ ggml_metal_kargs_opt_step_adamw args = {
+ /*.np =*/ np,
+ };
+
+ int ida = 0;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[3]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[4]), ida++);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0);
+ const int64_t n = (np + nth - 1) / nth;
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_opt_step_sgd(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS( int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS( int32_t, ne, op, ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb, op, nb);
+
+ auto pipeline = ggml_metal_library_get_pipeline_opt_step_sgd(lib, op);
+
+ const int64_t np = ggml_nelements(op->src[0]);
+ ggml_metal_kargs_opt_step_sgd args = {
+ /*.np =*/ np,
+ };
+
+ int ida = 0;
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes (enc, &args, sizeof(args), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[0]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[1]), ida++);
+ ggml_metal_encoder_set_buffer (enc, ggml_metal_get_buffer_id(op->src[2]), ida++);
+
+ const int nth = std::min(ggml_metal_pipeline_max_theads_per_threadgroup(pipeline), ne0);
+ const int64_t n = (np + nth - 1) / nth;
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, n, 1, 1, nth, 1, 1);
+
+ return 1;
+}
+
+int ggml_metal_op_count_equal(ggml_metal_op_t ctx, int idx) {
+ ggml_tensor * op = ctx->node(idx);
+
+ ggml_metal_library_t lib = ctx->lib;
+ ggml_metal_encoder_t enc = ctx->enc;
+
+ GGML_TENSOR_LOCALS(int32_t, ne0, op->src[0], ne);
+ GGML_TENSOR_LOCALS(uint64_t, nb0, op->src[0], nb);
+ GGML_TENSOR_LOCALS(uint64_t, nb1, op->src[1], nb);
+
+ {
+ ggml_metal_kargs_memset args = { /*.val =*/ 0 };
+
+ auto pipeline = ggml_metal_library_get_pipeline_memset(lib, op);
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 1);
+
+ ggml_metal_encoder_dispatch_threadgroups(enc, 1, 1, 1, 1, 1, 1);
+ }
+
+ ggml_metal_op_concurrency_reset(ctx);
+
+ {
+ ggml_metal_kargs_count_equal args = {
+ /*.ne00 =*/ ne00,
+ /*.ne01 =*/ ne01,
+ /*.ne02 =*/ ne02,
+ /*.ne03 =*/ ne03,
+ /*.nb00 =*/ nb00,
+ /*.nb01 =*/ nb01,
+ /*.nb02 =*/ nb02,
+ /*.nb03 =*/ nb03,
+ /*.nb10 =*/ nb10,
+ /*.nb11 =*/ nb11,
+ /*.nb12 =*/ nb12,
+ /*.nb13 =*/ nb13,
+ };
+
+ auto pipeline = ggml_metal_library_get_pipeline_count_equal(lib, op);
+
+ const size_t smem = pipeline.smem;
+
+ const int nth = 32*pipeline.nsg;
+
+ GGML_ASSERT(nth <= ggml_metal_pipeline_max_theads_per_threadgroup(pipeline));
+
+ ggml_metal_encoder_set_pipeline(enc, pipeline);
+ ggml_metal_encoder_set_bytes(enc, &args, sizeof(args), 0);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[0]), 1);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op->src[1]), 2);
+ ggml_metal_encoder_set_buffer(enc, ggml_metal_get_buffer_id(op), 3);
+
+ ggml_metal_encoder_set_threadgroup_memory_size(enc, smem, 0);
+ ggml_metal_encoder_dispatch_threadgroups(enc, ne01, ne02, ne03, nth, 1, 1);
+ }
+
+ return 1;
+}
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.h b/llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.h
new file mode 100644
index 0000000..29456d7
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal-ops.h
@@ -0,0 +1,93 @@
+#pragma once
+
+#include "ggml-metal-device.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct ggml_metal_op * ggml_metal_op_t;
+
+ggml_metal_op_t ggml_metal_op_init(
+ ggml_metal_device_t dev,
+ ggml_metal_cmd_buf_t cmd_buf,
+ struct ggml_cgraph * gf,
+ int idx_start,
+ int idx_end,
+ bool use_fusion,
+ bool use_concurrency,
+ bool use_capture,
+ int debug_graph,
+ int debug_fusion);
+
+void ggml_metal_op_free(ggml_metal_op_t ctx);
+
+int ggml_metal_op_n_nodes(ggml_metal_op_t ctx);
+
+int ggml_metal_op_encode(ggml_metal_op_t ctx, int idx);
+
+//
+// available ops:
+//
+
+// tokens per expert
+size_t ggml_metal_op_mul_mat_id_extra_tpe(const struct ggml_tensor * op);
+
+// id map [n_tokens, n_expert]
+size_t ggml_metal_op_mul_mat_id_extra_ids(const struct ggml_tensor * op);
+
+// return true if we should use the FA vector kernel for this op
+bool ggml_metal_op_flash_attn_ext_use_vec(const struct ggml_tensor * op);
+
+size_t ggml_metal_op_flash_attn_ext_extra_pad(const struct ggml_tensor * op);
+size_t ggml_metal_op_flash_attn_ext_extra_blk(const struct ggml_tensor * op);
+size_t ggml_metal_op_flash_attn_ext_extra_tmp(const struct ggml_tensor * op);
+
+int ggml_metal_op_concat (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_repeat (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_acc (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_unary (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_glu (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_sum (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_sum_rows (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_cumsum (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_get_rows (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_set_rows (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_diag (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_soft_max (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_ssm_conv (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_ssm_scan (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_rwkv (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_solve_tri (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_cpy (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_pool_1d (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_pool_2d (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_mul_mat (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_mul_mat_id (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_add_id (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_flash_attn_ext (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_bin (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_l2_norm (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_group_norm (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_norm (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_rope (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_im2col (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_conv_2d (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_conv_transpose_1d (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_conv_transpose_2d (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_upscale (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_pad (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_pad_reflect_1d (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_arange (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_timestep_embedding(ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_argmax (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_argsort (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_top_k (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_tri (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_opt_step_adamw (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_opt_step_sgd (ggml_metal_op_t ctx, int idx);
+int ggml_metal_op_count_equal (ggml_metal_op_t ctx, int idx);
+
+#ifdef __cplusplus
+}
+#endif
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal.cpp b/llama.cpp/ggml/src/ggml-metal/ggml-metal.cpp
new file mode 100644
index 0000000..1c70536
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal.cpp
@@ -0,0 +1,937 @@
+#include "ggml-metal.h"
+
+#include "ggml-impl.h"
+#include "ggml-backend-impl.h"
+
+#include "ggml-metal-device.h"
+#include "ggml-metal-context.h"
+#include "ggml-metal-ops.h"
+
+#include <mutex>
+#include <string>
+
+#define GGML_METAL_NAME "MTL"
+#define GGML_METAL_MAX_DEVICES 16
+
+// number of Metal devices
+// note: can be overriden with GGML_METAL_DEVICES env to simulate virtual devices
+static int g_devices = 1;
+
+////////////////////////////////////////////////////////////////////////////////
+// backend interface
+////////////////////////////////////////////////////////////////////////////////
+
+// shared buffer
+
+static void ggml_backend_metal_buffer_shared_free_buffer(ggml_backend_buffer_t buffer) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_free(ctx);
+}
+
+static void * ggml_backend_metal_buffer_shared_get_base(ggml_backend_buffer_t buffer) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+ return ggml_metal_buffer_get_base(ctx);
+}
+
+static void ggml_backend_metal_buffer_shared_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size);
+}
+
+static void ggml_backend_metal_buffer_shared_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size);
+}
+
+static void ggml_backend_metal_buffer_shared_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size);
+}
+
+static bool ggml_backend_metal_buffer_shared_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+ GGML_UNUSED(buffer);
+ GGML_UNUSED(src);
+ GGML_UNUSED(dst);
+
+ return false;
+}
+
+static void ggml_backend_metal_buffer_shared_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_clear(ctx, value);
+}
+
+static ggml_backend_buffer_i ggml_backend_metal_buffer_shared_i = {
+ /* .free_buffer = */ ggml_backend_metal_buffer_shared_free_buffer,
+ /* .get_base = */ ggml_backend_metal_buffer_shared_get_base,
+ /* .init_tensor = */ NULL,
+ /* .memset_tensor = */ ggml_backend_metal_buffer_shared_memset_tensor,
+ /* .set_tensor = */ ggml_backend_metal_buffer_shared_set_tensor,
+ /* .get_tensor = */ ggml_backend_metal_buffer_shared_get_tensor,
+ /* .cpy_tensor = */ ggml_backend_metal_buffer_shared_cpy_tensor,
+ /* .clear = */ ggml_backend_metal_buffer_shared_clear,
+ /* .reset = */ NULL,
+};
+
+// private buffer
+
+static void ggml_backend_metal_buffer_private_free_buffer(ggml_backend_buffer_t buffer) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_free(ctx);
+}
+
+static void * ggml_backend_metal_buffer_private_get_base(ggml_backend_buffer_t buffer) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+ return ggml_metal_buffer_get_base(ctx);
+}
+
+static void ggml_backend_metal_buffer_private_memset_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, uint8_t value, size_t offset, size_t size) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_memset_tensor(ctx, tensor, value, offset, size);
+}
+
+static void ggml_backend_metal_buffer_private_set_tensor(ggml_backend_buffer_t buffer, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_set_tensor(ctx, tensor, data, offset, size);
+}
+
+static void ggml_backend_metal_buffer_private_get_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_get_tensor(ctx, tensor, data, offset, size);
+}
+
+static bool ggml_backend_metal_buffer_private_cpy_tensor(ggml_backend_buffer_t buffer, const ggml_tensor * src, ggml_tensor * dst) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+ GGML_UNUSED(buffer);
+ GGML_UNUSED(src);
+ GGML_UNUSED(dst);
+
+ return false;
+}
+
+static void ggml_backend_metal_buffer_private_clear(ggml_backend_buffer_t buffer, uint8_t value) {
+ ggml_metal_buffer_t ctx = (ggml_metal_buffer_t)buffer->context;
+
+ GGML_ASSERT(!ggml_metal_buffer_is_shared(ctx));
+
+ ggml_metal_buffer_clear(ctx, value);
+}
+
+static ggml_backend_buffer_i ggml_backend_metal_buffer_private_i = {
+ /* .free_buffer = */ ggml_backend_metal_buffer_private_free_buffer,
+ /* .get_base = */ ggml_backend_metal_buffer_private_get_base,
+ /* .init_tensor = */ NULL,
+ /* .memset_tensor = */ ggml_backend_metal_buffer_private_memset_tensor,
+ /* .set_tensor = */ ggml_backend_metal_buffer_private_set_tensor,
+ /* .get_tensor = */ ggml_backend_metal_buffer_private_get_tensor,
+ /* .cpy_tensor = */ ggml_backend_metal_buffer_private_cpy_tensor,
+ /* .clear = */ ggml_backend_metal_buffer_private_clear,
+ /* .reset = */ NULL,
+};
+
+static bool ggml_backend_buffer_is_metal(ggml_backend_buffer_t buffer) {
+ return buffer->iface.free_buffer == ggml_backend_metal_buffer_shared_free_buffer ||
+ buffer->iface.free_buffer == ggml_backend_metal_buffer_private_free_buffer;
+}
+
+//
+// buffer types
+//
+
+struct ggml_backend_metal_buffer_type {
+ int device;
+ std::string name;
+};
+
+struct ggml_backend_metal_buffer_type_deleter {
+ void operator()(ggml_backend_metal_buffer_type * ctx) const {
+ delete ctx;
+ }
+};
+
+typedef std::unique_ptr<ggml_backend_metal_buffer_type, ggml_backend_metal_buffer_type_deleter> ggml_backend_metal_buffer_type_ptr;
+
+// common method for allocating shread or private Metal buffers
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size, bool shared) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+ ggml_metal_buffer_t res = ggml_metal_buffer_init(ctx_dev, size, shared);
+
+ ggml_backend_buffer_i buf_i = ggml_metal_buffer_is_shared(res)
+ ? ggml_backend_metal_buffer_shared_i
+ : ggml_backend_metal_buffer_private_i;
+
+ return ggml_backend_buffer_init(buft, buf_i, res, size);
+}
+
+static size_t ggml_backend_metal_buffer_type_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ size_t res = ggml_nbytes(tensor);
+
+ // some operations require additional memory for fleeting data:
+ switch (tensor->op) {
+ case GGML_OP_MUL_MAT_ID:
+ {
+ res += ggml_metal_op_mul_mat_id_extra_tpe(tensor);
+ res += ggml_metal_op_mul_mat_id_extra_ids(tensor);
+ } break;
+ case GGML_OP_FLASH_ATTN_EXT:
+ {
+ res += ggml_metal_op_flash_attn_ext_extra_pad(tensor);
+ res += ggml_metal_op_flash_attn_ext_extra_blk(tensor);
+ res += ggml_metal_op_flash_attn_ext_extra_tmp(tensor);
+ } break;
+ case GGML_OP_CUMSUM:
+ case GGML_OP_ARGSORT:
+ {
+ res *= 2;
+ } break;
+ case GGML_OP_TOP_K:
+ {
+ res = 2*sizeof(int32_t)*ggml_nelements(tensor->src[0]);
+ } break;
+ default:
+ break;
+ }
+
+ return res;
+
+ GGML_UNUSED(buft);
+}
+
+// default (shared) buffer type
+
+static const char * ggml_backend_metal_buffer_type_shared_get_name(ggml_backend_buffer_type_t buft) {
+ ggml_backend_metal_buffer_type * ctx = (ggml_backend_metal_buffer_type *)buft->context;
+
+ return ctx->name.c_str();
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_shared_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
+}
+
+static size_t ggml_backend_metal_buffer_type_shared_get_alignment(ggml_backend_buffer_type_t buft) {
+ return 32;
+
+ GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_metal_buffer_type_shared_get_max_size(ggml_backend_buffer_type_t buft) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+
+ return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
+}
+
+static size_t ggml_backend_metal_buffer_type_shared_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
+}
+
+static bool ggml_backend_metal_buffer_type_shared_is_host(ggml_backend_buffer_type_t buft) {
+ return false;
+
+ GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_shared(int device) {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+
+ static std::vector<ggml_backend_buffer_type> bufts;
+ static std::vector<ggml_backend_metal_buffer_type_ptr> ctxs;
+
+ static bool initialized = false;
+ if (!initialized) {
+ bufts.reserve(g_devices);
+ ctxs.reserve(g_devices);
+
+ for (int i = 0; i < g_devices; ++i) {
+ ggml_backend_metal_buffer_type * raw_ctx =
+ new ggml_backend_metal_buffer_type {
+ /* .device = */ i,
+ /* .name = */ GGML_METAL_NAME + std::to_string(i),
+ };
+ ctxs.emplace_back(raw_ctx);
+
+ ggml_backend_buffer_type buft = {
+ /* .iface = */ {
+ /* .get_name = */ ggml_backend_metal_buffer_type_shared_get_name,
+ /* .alloc_buffer = */ ggml_backend_metal_buffer_type_shared_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_metal_buffer_type_shared_get_alignment,
+ /* .get_max_size = */ ggml_backend_metal_buffer_type_shared_get_max_size,
+ /* .get_alloc_size = */ ggml_backend_metal_buffer_type_shared_get_alloc_size,
+ /* .is_host = */ ggml_backend_metal_buffer_type_shared_is_host,
+ },
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_metal_reg(), i),
+ /* .context = */ raw_ctx,
+ };
+
+ bufts.emplace_back(buft);
+ }
+
+ initialized = true;
+ }
+
+ return &bufts[device];
+}
+
+// default (private) buffer type
+
+static const char * ggml_backend_metal_buffer_type_private_get_name(ggml_backend_buffer_type_t buft) {
+ ggml_backend_metal_buffer_type * ctx = (ggml_backend_metal_buffer_type *)buft->context;
+
+ return ctx->name.c_str();
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_private_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, false);
+}
+
+static size_t ggml_backend_metal_buffer_type_private_get_alignment(ggml_backend_buffer_type_t buft) {
+ return 32;
+
+ GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_metal_buffer_type_private_get_max_size(ggml_backend_buffer_type_t buft) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+
+ return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
+}
+
+static size_t ggml_backend_metal_buffer_type_private_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
+}
+
+static bool ggml_backend_metal_buffer_type_private_is_host(ggml_backend_buffer_type_t buft) {
+ return false;
+
+ GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_private(int device) {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+
+ static std::vector<ggml_backend_buffer_type> bufts;
+ static std::vector<ggml_backend_metal_buffer_type_ptr> ctxs;
+
+ static bool initialized = false;
+ if (!initialized) {
+ bufts.reserve(g_devices);
+ ctxs.reserve(g_devices);
+
+ for (int i = 0; i < g_devices; ++i) {
+ ggml_backend_metal_buffer_type * raw_ctx = new ggml_backend_metal_buffer_type{
+ /* .device = */ i,
+ /* .name = */ GGML_METAL_NAME + std::to_string(i) + "_Private"
+ };
+ ctxs.emplace_back(raw_ctx);
+
+ ggml_backend_buffer_type buft = {
+ /* .iface = */ {
+ /* .get_name = */ ggml_backend_metal_buffer_type_private_get_name,
+ /* .alloc_buffer = */ ggml_backend_metal_buffer_type_private_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_metal_buffer_type_private_get_alignment,
+ /* .get_max_size = */ ggml_backend_metal_buffer_type_private_get_max_size,
+ /* .get_alloc_size = */ ggml_backend_metal_buffer_type_private_get_alloc_size,
+ /* .is_host = */ ggml_backend_metal_buffer_type_private_is_host,
+ },
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_metal_reg(), i),
+ /* .context = */ raw_ctx,
+ };
+
+ bufts.emplace_back(buft);
+ }
+
+ initialized = true;
+ }
+
+ return &bufts[device];
+}
+
+// mapped buffer type
+
+static const char * ggml_backend_metal_buffer_type_mapped_get_name(ggml_backend_buffer_type_t buft) {
+ ggml_backend_metal_buffer_type * ctx = (ggml_backend_metal_buffer_type *)buft->context;
+
+ return ctx->name.c_str();
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_buffer_type_mapped_alloc_buffer(ggml_backend_buffer_type_t buft, size_t size) {
+ // for mapped buffers, prefer shared memory
+ return ggml_backend_metal_buffer_type_alloc_buffer(buft, size, true);
+}
+
+static size_t ggml_backend_metal_buffer_type_mapped_get_alignment(ggml_backend_buffer_type_t buft) {
+ return 32;
+
+ GGML_UNUSED(buft);
+}
+
+static size_t ggml_backend_metal_buffer_type_mapped_get_max_size(ggml_backend_buffer_type_t buft) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)buft->device->context;
+
+ return ggml_metal_device_get_props(ctx_dev)->max_buffer_size;
+}
+
+static size_t ggml_backend_metal_buffer_type_mapped_get_alloc_size(ggml_backend_buffer_type_t buft, const ggml_tensor * tensor) {
+ return ggml_backend_metal_buffer_type_get_alloc_size(buft, tensor);
+}
+
+static bool ggml_backend_metal_buffer_type_mapped_is_host(ggml_backend_buffer_type_t buft) {
+ return false;
+
+ GGML_UNUSED(buft);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_buffer_type_mapped(int device) {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+
+ static std::vector<ggml_backend_buffer_type> bufts;
+ static std::vector<ggml_backend_metal_buffer_type_ptr> ctxs;
+
+ static bool initialized = false;
+ if (!initialized) {
+ bufts.reserve(g_devices);
+ ctxs.reserve(g_devices);
+
+ for (int i = 0; i < g_devices; ++i) {
+ ggml_backend_metal_buffer_type * raw_ctx = new ggml_backend_metal_buffer_type{
+ /* .device = */ i,
+ /* .name = */ GGML_METAL_NAME + std::to_string(i) + "_Mapped"
+ };
+ ctxs.emplace_back(raw_ctx);
+
+ // note: not obvious, but this buffer type still needs to implement .alloc_buffer:
+ // https://github.com/ggml-org/llama.cpp/pull/15832#discussion_r2333177099
+ ggml_backend_buffer_type buft = {
+ /* .iface = */ {
+ /* .get_name = */ ggml_backend_metal_buffer_type_mapped_get_name,
+ /* .alloc_buffer = */ ggml_backend_metal_buffer_type_mapped_alloc_buffer,
+ /* .get_alignment = */ ggml_backend_metal_buffer_type_mapped_get_alignment,
+ /* .get_max_size = */ ggml_backend_metal_buffer_type_mapped_get_max_size,
+ /* .get_alloc_size = */ ggml_backend_metal_buffer_type_mapped_get_alloc_size,
+ /* .is_host = */ ggml_backend_metal_buffer_type_mapped_is_host,
+ },
+ /* .device = */ ggml_backend_reg_dev_get(ggml_backend_metal_reg(), i),
+ /* .context = */ raw_ctx,
+ };
+
+ bufts.emplace_back(buft);
+ }
+
+ initialized = true;
+ }
+
+ return &bufts[device];
+}
+
+// backend
+
+static const char * ggml_backend_metal_name(ggml_backend_t backend) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ return ggml_metal_get_name(ctx);
+}
+
+static void ggml_backend_metal_free(ggml_backend_t backend) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ // wait for any ongoing async operations to finish
+ ggml_metal_synchronize(ctx);
+
+ ggml_metal_free(ctx);
+
+ free(backend);
+}
+
+static void ggml_backend_metal_synchronize(ggml_backend_t backend) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ ggml_metal_synchronize(ctx);
+}
+
+static void ggml_backend_metal_set_tensor_async(ggml_backend_t backend, ggml_tensor * tensor, const void * data, size_t offset, size_t size) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ ggml_metal_set_tensor_async(ctx, tensor, data, offset, size);
+}
+
+static void ggml_backend_metal_get_tensor_async(ggml_backend_t backend, const ggml_tensor * tensor, void * data, size_t offset, size_t size) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ ggml_metal_get_tensor_async(ctx, tensor, data, offset, size);
+}
+
+static bool ggml_backend_metal_cpy_tensor_async(ggml_backend_t backend_src, ggml_backend_t backend_dst, const ggml_tensor * src, ggml_tensor * dst) {
+ if (!ggml_backend_is_metal(backend_src) || !ggml_backend_is_metal(backend_dst)) {
+ return false;
+ }
+
+ if (!ggml_backend_buffer_is_metal(src->buffer) || !ggml_backend_buffer_is_metal(dst->buffer)) {
+ return false;
+ }
+
+ ggml_metal_t ctx_src = (ggml_metal_t)backend_src->context;
+ ggml_metal_t ctx_dst = (ggml_metal_t)backend_dst->context;
+
+ //ggml_backend_buffer_t buf_src = src->view_src ? src->view_src->buffer : src->buffer;
+ //ggml_backend_buffer_t buf_dst = dst->view_src ? dst->view_src->buffer : dst->buffer;
+
+ //ggml_metal_buffer_t buf_ctx_src = (ggml_metal_buffer_t)buf_src->context;
+ //ggml_metal_buffer_t buf_ctx_dst = (ggml_metal_buffer_t)buf_dst->context;
+
+ return ggml_metal_cpy_tensor_async(ctx_src, ctx_dst, src, dst);
+}
+
+static enum ggml_status ggml_backend_metal_graph_compute(ggml_backend_t backend, ggml_cgraph * cgraph) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ return ggml_metal_graph_compute(ctx, cgraph);
+}
+
+static void ggml_backend_metal_event_record(ggml_backend_t backend, ggml_backend_event_t event) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+ ggml_metal_event_t ev = (ggml_metal_event_t)event->context;
+
+ ggml_metal_event_record(ctx, ev);
+}
+
+static void ggml_backend_metal_event_wait(ggml_backend_t backend, ggml_backend_event_t event) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+ ggml_metal_event_t ev = (ggml_metal_event_t)event->context;
+
+ ggml_metal_event_wait(ctx, ev);
+}
+
+static void ggml_backend_metal_graph_optimize(ggml_backend_t backend, ggml_cgraph * cgraph) {
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ ggml_metal_graph_optimize(ctx, cgraph);
+}
+
+static void ggml_backend_metal_set_n_cb(ggml_backend_t backend, int n_cb) {
+ GGML_ASSERT(ggml_backend_is_metal(backend));
+
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ ggml_metal_set_n_cb(ctx, n_cb);
+}
+
+static ggml_backend_i ggml_backend_metal_i = {
+ /* .get_name = */ ggml_backend_metal_name,
+ /* .free = */ ggml_backend_metal_free,
+ /* .set_tensor_async = */ ggml_backend_metal_set_tensor_async,
+ /* .get_tensor_async = */ ggml_backend_metal_get_tensor_async,
+ /* .cpy_tensor_async = */ ggml_backend_metal_cpy_tensor_async, // only needed for multi-GPU setups
+ /* .synchronize = */ ggml_backend_metal_synchronize,
+ /* .graph_plan_create = */ NULL,
+ /* .graph_plan_free = */ NULL,
+ /* .graph_plan_update = */ NULL,
+ /* .graph_plan_compute = */ NULL,
+ /* .graph_compute = */ ggml_backend_metal_graph_compute,
+ /* .event_record = */ ggml_backend_metal_event_record,
+ /* .event_wait = */ ggml_backend_metal_event_wait,
+ /* .graph_optimize = */ ggml_backend_metal_graph_optimize,
+};
+
+static ggml_guid_t ggml_backend_metal_guid(void) {
+ static ggml_guid guid = { 0x81, 0xa1, 0x8b, 0x1e, 0x71, 0xec, 0x79, 0xed, 0x2b, 0x85, 0xdc, 0x8a, 0x61, 0x98, 0x30, 0xe6 };
+ return &guid;
+}
+
+ggml_backend_t ggml_backend_metal_init(void) {
+ ggml_backend_dev_t dev = ggml_backend_reg_dev_get(ggml_backend_metal_reg(), 0);
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ ggml_metal_t ctx = ggml_metal_init(ctx_dev);
+ if (ctx == NULL) {
+ GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
+ return NULL;
+ }
+
+ ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
+
+ *backend = {
+ /* .guid = */ ggml_backend_metal_guid(),
+ /* .interface = */ ggml_backend_metal_i,
+ /* .device = */ dev,
+ /* .context = */ ctx,
+ };
+
+ ggml_backend_metal_set_n_cb(backend, 1);
+
+ return backend;
+}
+
+bool ggml_backend_is_metal(ggml_backend_t backend) {
+ return backend != NULL && ggml_guid_matches(backend->guid, ggml_backend_metal_guid());
+}
+
+void ggml_backend_metal_set_abort_callback(ggml_backend_t backend, ggml_abort_callback abort_callback, void * user_data) {
+ GGML_ASSERT(ggml_backend_is_metal(backend));
+
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ ggml_metal_set_abort_callback(ctx, abort_callback, user_data);
+}
+
+bool ggml_backend_metal_supports_family(ggml_backend_t backend, int family) {
+ GGML_ASSERT(ggml_backend_is_metal(backend));
+
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ return ggml_metal_supports_family(ctx, family);
+}
+
+void ggml_backend_metal_capture_next_compute(ggml_backend_t backend) {
+ GGML_ASSERT(ggml_backend_is_metal(backend));
+
+ ggml_metal_t ctx = (ggml_metal_t)backend->context;
+
+ ggml_metal_capture_next_compute(ctx);
+}
+
+// backend device
+
+static const char * ggml_backend_metal_device_get_name(ggml_backend_dev_t dev) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx_dev);
+
+ return props_dev->name;
+}
+
+static const char * ggml_backend_metal_device_get_description(ggml_backend_dev_t dev) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ return ggml_metal_device_get_props(ctx_dev)->desc;
+}
+
+static void ggml_backend_metal_device_get_memory(ggml_backend_dev_t dev, size_t * free, size_t * total) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ ggml_metal_device_get_memory(ctx_dev, free, total);
+}
+
+static enum ggml_backend_dev_type ggml_backend_metal_device_get_type(ggml_backend_dev_t dev) {
+ return GGML_BACKEND_DEVICE_TYPE_GPU;
+
+ GGML_UNUSED(dev);
+}
+
+static void ggml_backend_metal_device_get_props(ggml_backend_dev_t dev, ggml_backend_dev_props * props) {
+ props->name = ggml_backend_metal_device_get_name(dev);
+ props->description = ggml_backend_metal_device_get_description(dev);
+ props->type = ggml_backend_metal_device_get_type(dev);
+
+ ggml_backend_metal_device_get_memory(dev, &props->memory_free, &props->memory_total);
+
+ props->caps = {
+ /* .async = */ true,
+ /* .host_buffer = */ false,
+ /* .buffer_from_host_ptr = */ true,
+ /* .events = */ true,
+ };
+}
+
+static ggml_backend_t ggml_backend_metal_device_init_backend(ggml_backend_dev_t dev, const char * params) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ ggml_metal_t ctx = ggml_metal_init(ctx_dev);
+ if (ctx == NULL) {
+ GGML_LOG_ERROR("%s: error: failed to allocate context\n", __func__);
+ return NULL;
+ }
+
+ ggml_backend_t backend = (ggml_backend_t) malloc(sizeof(ggml_backend));
+
+ *backend = {
+ /* .guid = */ ggml_backend_metal_guid(),
+ /* .interface = */ ggml_backend_metal_i,
+ /* .device = */ dev,
+ /* .context = */ ctx,
+ };
+
+ ggml_backend_metal_set_n_cb(backend, 1);
+
+ return backend;
+
+ GGML_UNUSED(params);
+}
+
+static ggml_backend_buffer_type_t ggml_backend_metal_device_get_buffer_type(ggml_backend_dev_t dev) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx_dev);
+
+ return props_dev->use_shared_buffers ? ggml_backend_metal_buffer_type_shared(props_dev->device) : ggml_backend_metal_buffer_type_private(props_dev->device);
+}
+
+static ggml_backend_buffer_t ggml_backend_metal_device_buffer_mapped(ggml_backend_dev_t dev, void * ptr, size_t size, size_t max_tensor_size) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ ggml_metal_buffer_t res = ggml_metal_buffer_map(ctx_dev, ptr, size, max_tensor_size);
+
+ const ggml_metal_device_props * props_dev = ggml_metal_device_get_props(ctx_dev);
+
+ return ggml_backend_buffer_init(ggml_backend_metal_buffer_type_mapped(props_dev->device), ggml_backend_metal_buffer_shared_i, res, size);
+}
+
+static bool ggml_backend_metal_device_supports_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ return ggml_metal_device_supports_op(ctx_dev, op);
+}
+
+static bool ggml_backend_metal_device_supports_buft(ggml_backend_dev_t dev, ggml_backend_buffer_type_t buft) {
+ return
+ buft->device == dev && (
+ buft->iface.get_name == ggml_backend_metal_buffer_type_shared_get_name ||
+ buft->iface.get_name == ggml_backend_metal_buffer_type_private_get_name ||
+ buft->iface.get_name == ggml_backend_metal_buffer_type_mapped_get_name);
+
+ GGML_UNUSED(dev);
+}
+
+static int64_t get_op_batch_size(const ggml_tensor * op) {
+ switch (op->op) {
+ case GGML_OP_MUL_MAT:
+ return op->ne[1];
+ case GGML_OP_MUL_MAT_ID:
+ return op->ne[2];
+ default:
+ return ggml_nrows(op);
+ }
+}
+
+static bool ggml_backend_metal_device_offload_op(ggml_backend_dev_t dev, const ggml_tensor * op) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ return (op->op == GGML_OP_MUL_MAT ||
+ op->op == GGML_OP_MUL_MAT_ID) &&
+ get_op_batch_size(op) >= ggml_metal_device_get_props(ctx_dev)->op_offload_min_batch_size;
+}
+
+static ggml_backend_event_t ggml_backend_metal_device_event_new(ggml_backend_dev_t dev) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ ggml_metal_event_t event = ggml_metal_device_event_init(ctx_dev);
+ GGML_ASSERT(event);
+
+ ggml_backend_event_t ev = new ggml_backend_event {
+ /* .device = */ dev,
+ /* .context = */ event,
+ };
+
+ return ev;
+}
+
+static void ggml_backend_metal_device_event_free(ggml_backend_dev_t dev, ggml_backend_event_t event) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ ggml_metal_event_t ev = (ggml_metal_event_t)event->context;
+
+ ggml_metal_device_event_free(ctx_dev, ev);
+
+ delete event;
+}
+
+static void ggml_backend_metal_device_event_synchronize(ggml_backend_dev_t dev, ggml_backend_event_t event) {
+ ggml_metal_device_t ctx_dev = (ggml_metal_device_t)dev->context;
+
+ ggml_metal_event_t evt = (ggml_metal_event_t)event->context;
+
+ ggml_metal_device_event_synchronize(ctx_dev, evt);
+}
+
+static ggml_backend_device_i ggml_backend_metal_device_i = {
+ /* .get_name = */ ggml_backend_metal_device_get_name,
+ /* .get_description = */ ggml_backend_metal_device_get_description,
+ /* .get_memory = */ ggml_backend_metal_device_get_memory,
+ /* .get_type = */ ggml_backend_metal_device_get_type,
+ /* .get_props = */ ggml_backend_metal_device_get_props,
+ /* .init_backend = */ ggml_backend_metal_device_init_backend,
+ /* .get_buffer_type = */ ggml_backend_metal_device_get_buffer_type,
+ /* .get_host_buffer_type = */ NULL,
+ /* .buffer_from_host_ptr = */ ggml_backend_metal_device_buffer_mapped,
+ /* .supports_op = */ ggml_backend_metal_device_supports_op,
+ /* .supports_buft = */ ggml_backend_metal_device_supports_buft,
+ /* .offload_op = */ ggml_backend_metal_device_offload_op,
+ /* .event_new = */ ggml_backend_metal_device_event_new,
+ /* .event_free = */ ggml_backend_metal_device_event_free,
+ /* .event_synchronize = */ ggml_backend_metal_device_event_synchronize,
+};
+
+// backend registry
+
+struct ggml_backend_metal_reg {
+ std::vector<ggml_backend_dev_t> devices;
+};
+
+typedef struct ggml_backend_metal_reg * ggml_backend_metal_reg_t;
+
+static ggml_backend_metal_reg_t ggml_backend_metal_reg_init(void) {
+ ggml_backend_metal_reg_t ctx = new struct ggml_backend_metal_reg;
+
+ return ctx;
+}
+
+static void ggml_backend_metal_reg_free(ggml_backend_metal_reg_t ctx) {
+ delete ctx;
+}
+
+struct ggml_backend_metal_reg_deleter {
+ void operator()(ggml_backend_metal_reg_t ctx) {
+ ggml_backend_metal_reg_free(ctx);
+ }
+};
+
+typedef std::unique_ptr<struct ggml_backend_metal_reg, ggml_backend_metal_reg_deleter> ggml_backend_metal_reg_ptr;
+
+static const char * ggml_backend_metal_reg_get_name(ggml_backend_reg_t reg) {
+ return GGML_METAL_NAME;
+
+ GGML_UNUSED(reg);
+}
+
+static size_t ggml_backend_metal_reg_device_count(ggml_backend_reg_t reg) {
+ ggml_backend_metal_reg_t ctx = (ggml_backend_metal_reg_t)reg->context;
+ return ctx->devices.size();
+}
+
+static ggml_backend_dev_t ggml_backend_metal_reg_device_get(ggml_backend_reg_t reg, size_t index) {
+ ggml_backend_metal_reg_t ctx = (ggml_backend_metal_reg_t)reg->context;
+ GGML_ASSERT(index < ctx->devices.size());
+ return ctx->devices[index];
+}
+
+static ggml_backend_feature g_ggml_backend_metal_features[] = {
+#if defined(GGML_METAL_EMBED_LIBRARY)
+ { "EMBED_LIBRARY", "1" },
+#endif
+ { NULL, NULL },
+};
+
+static ggml_backend_feature * ggml_backend_metal_get_features(ggml_backend_reg_t reg) {
+ return g_ggml_backend_metal_features;
+
+ GGML_UNUSED(reg);
+}
+
+static void * ggml_backend_metal_get_proc_address(ggml_backend_reg_t reg, const char * name) {
+ if (strcmp(name, "ggml_backend_get_features") == 0) {
+ return (void *)ggml_backend_metal_get_features;
+ }
+
+ return NULL;
+
+ GGML_UNUSED(reg);
+}
+
+static ggml_backend_reg_i ggml_backend_metal_reg_i = {
+ /* .get_name = */ ggml_backend_metal_reg_get_name,
+ /* .get_device_count = */ ggml_backend_metal_reg_device_count,
+ /* .get_device = */ ggml_backend_metal_reg_device_get,
+ /* .get_proc_address = */ ggml_backend_metal_get_proc_address,
+};
+
+static ggml_backend_dev_t ggml_backend_metal_device_init(ggml_backend_reg_t reg, int device) {
+ return new ggml_backend_device {
+ /* .iface = */ ggml_backend_metal_device_i,
+ /* .reg = */ reg,
+ /* .context = */ ggml_metal_device_get(device),
+ };
+}
+
+static void ggml_backend_metal_device_free(ggml_backend_dev_t dev) {
+ delete dev;
+}
+
+struct ggml_backend_device_deleter {
+ void operator()(ggml_backend_dev_t ctx) {
+ ggml_backend_metal_device_free(ctx);
+ }
+};
+
+typedef std::unique_ptr<ggml_backend_device, ggml_backend_device_deleter> ggml_backend_device_ptr;
+
+ggml_backend_reg_t ggml_backend_metal_reg(void) {
+ static ggml_backend_reg reg;
+ static bool initialized = false;
+
+ {
+ static std::mutex mutex;
+ std::lock_guard<std::mutex> lock(mutex);
+
+ const char * env = getenv("GGML_METAL_DEVICES");
+ if (env) {
+ g_devices = atoi(env);
+ }
+
+ static std::vector<ggml_backend_device_ptr> devs;
+
+ if (!initialized) {
+ static ggml_backend_metal_reg_ptr reg_ctx(ggml_backend_metal_reg_init());
+
+ for (int i = 0; i < g_devices; ++i) {
+ auto * dev = ggml_backend_metal_device_init(&reg, i);
+ devs.emplace_back(dev);
+
+ reg_ctx->devices.push_back(dev);
+ }
+
+ reg = {
+ /* .api_version = */ GGML_BACKEND_API_VERSION,
+ /* .iface = */ ggml_backend_metal_reg_i,
+ /* .context = */ reg_ctx.get(),
+ };
+ }
+
+ initialized = true;
+ }
+
+ return &reg;
+}
+
+GGML_BACKEND_DL_IMPL(ggml_backend_metal_reg)
diff --git a/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal b/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal
new file mode 100644
index 0000000..0036ba9
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-metal/ggml-metal.metal
@@ -0,0 +1,9798 @@
+#define GGML_COMMON_DECL_METAL
+#define GGML_COMMON_IMPL_METAL
+#if defined(GGML_METAL_EMBED_LIBRARY)
+__embed_ggml-common.h__
+#else
+#include "ggml-common.h"
+#endif
+#include "ggml-metal-impl.h"
+
+#include <metal_stdlib>
+
+#ifdef GGML_METAL_HAS_TENSOR
+#include <metal_tensor>
+
+#include <MetalPerformancePrimitives/MetalPerformancePrimitives.h>
+#endif
+
+using namespace metal;
+
+#define MAX(x, y) ((x) > (y) ? (x) : (y))
+#define MIN(x, y) ((x) < (y) ? (x) : (y))
+#define SWAP(x, y) { auto tmp = (x); (x) = (y); (y) = tmp; }
+
+#define PAD2(x, n) (((x) + (n) - 1) & ~((n) - 1))
+
+#define FOR_UNROLL(x) _Pragma("clang loop unroll(full)") for (x)
+
+#define N_SIMDWIDTH 32 // assuming SIMD group size is 32
+
+// ref: https://developer.apple.com/metal/Metal-Shading-Language-Specification.pdf
+//
+// cmd:
+// .../usr/bin/metal -dM -E -c ggml/src/ggml-metal/ggml-metal.metal
+// .../usr/bin/metal -dM -E -c -target air64-apple-ios14.0 ggml/src/ggml-metal/ggml-metal.metal
+//
+#if __METAL_VERSION__ < 310 && defined(GGML_METAL_HAS_BF16)
+#undef GGML_METAL_HAS_BF16
+#endif
+
+#if defined(GGML_METAL_HAS_BF16)
+typedef matrix<bfloat, 4, 4> bfloat4x4;
+typedef matrix<bfloat, 2, 4> bfloat2x4;
+#endif
+
+constexpr constant static float kvalues_iq4nl_f[16] = {
+ -127.f, -104.f, -83.f, -65.f, -49.f, -35.f, -22.f, -10.f, 1.f, 13.f, 25.f, 38.f, 53.f, 69.f, 89.f, 113.f
+};
+
+constexpr constant static float kvalues_mxfp4_f[16] = {
+ 0, .5f, 1.f, 1.5f, 2.f, 3.f, 4.f, 6.f, -0, -.5f, -1.f, -1.5f, -2.f, -3.f, -4.f, -6.f
+};
+
+static inline int best_index_int8(int n, constant float * val, float x) {
+ if (x <= val[0]) return 0;
+ if (x >= val[n-1]) return n-1;
+ int ml = 0, mu = n-1;
+ while (mu-ml > 1) {
+ int mav = (ml+mu)/2;
+ if (x < val[mav]) mu = mav; else ml = mav;
+ }
+ return x - val[mu-1] < val[mu] - x ? mu-1 : mu;
+}
+
+static inline float e8m0_to_fp32(uint8_t x) {
+ uint32_t bits;
+
+ if (x == 0) {
+ bits = 0x00400000;
+ } else {
+ bits = (uint32_t) x << 23;
+ }
+
+ return as_type<float>(bits);
+}
+
+static inline float dot(float x, float y) {
+ return x*y;
+}
+
+// NOTE: this is not dequantizing - we are simply fitting the template
+template <typename type4x4>
+void dequantize_f32(device const float4x4 * src, short il, thread type4x4 & reg) {
+ reg = (type4x4)(*src);
+}
+
+template <typename type4>
+void dequantize_f32_t4(device const float4 * src, short il, thread type4 & reg) {
+ reg = (type4)(*src);
+}
+
+template <typename type4x4>
+void dequantize_f16(device const half4x4 * src, short il, thread type4x4 & reg) {
+ reg = (type4x4)(*src);
+}
+
+template <typename type4>
+void dequantize_f16_t4(device const half4 * src, short il, thread type4 & reg) {
+ reg = (type4)(*(src));
+}
+
+#if defined(GGML_METAL_HAS_BF16)
+template <typename type4x4>
+void dequantize_bf16(device const bfloat4x4 * src, short il, thread type4x4 & reg) {
+ reg = (type4x4)(*src);
+}
+
+template <typename type4>
+void dequantize_bf16_t4(device const bfloat4 * src, short il, thread type4 & reg) {
+ reg = (type4)(*(src));
+}
+#endif
+
+template <typename type4x4>
+void dequantize_q4_0(device const block_q4_0 * xb, short il, thread type4x4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 1);
+ const float d1 = il ? (xb->d / 16.h) : xb->d;
+ const float d2 = d1 / 256.f;
+ const float md = -8.h * xb->d;
+ const ushort mask0 = il ? 0x00F0 : 0x000F;
+ const ushort mask1 = mask0 << 8;
+
+ float4x4 reg_f;
+
+ for (int i = 0; i < 8; i++) {
+ reg_f[i/2][2*(i%2) + 0] = d1 * (qs[i] & mask0) + md;
+ reg_f[i/2][2*(i%2) + 1] = d2 * (qs[i] & mask1) + md;
+ }
+
+ reg = (type4x4) reg_f;
+}
+
+template <typename type4>
+void dequantize_q4_0_t4(device const block_q4_0 * xb, short il, thread type4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 1);
+ const float d1 = (il/4) ? (xb->d / 16.h) : xb->d;
+ const float d2 = d1 / 256.f;
+ const float md = -8.h * xb->d;
+ const ushort mask0 = (il/4) ? 0x00F0 : 0x000F;
+ const ushort mask1 = mask0 << 8;
+
+ for (int i = 0; i < 2; i++) {
+ reg[2*i + 0] = d1 * (qs[2*(il%4) + i] & mask0) + md;
+ reg[2*i + 1] = d2 * (qs[2*(il%4) + i] & mask1) + md;
+ }
+}
+
+void quantize_q4_0(device const float * src, device block_q4_0 & dst) {
+#pragma METAL fp math_mode(safe)
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < QK4_0; j++) {
+ const float v = src[j];
+ if (amax < fabs(v)) {
+ amax = fabs(v);
+ max = v;
+ }
+ }
+
+ const float d = max / -8;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ dst.d = d;
+
+ for (int j = 0; j < QK4_0/2; ++j) {
+ const float x0 = src[0 + j]*id;
+ const float x1 = src[QK4_0/2 + j]*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 8.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 8.5f));
+
+ dst.qs[j] = xi0;
+ dst.qs[j] |= xi1 << 4;
+ }
+}
+
+void quantize_q4_1(device const float * src, device block_q4_1 & dst) {
+#pragma METAL fp math_mode(safe)
+ float min = FLT_MAX;
+ float max = -FLT_MAX;
+
+ for (int j = 0; j < QK4_1; j++) {
+ const float v = src[j];
+ if (min > v) min = v;
+ if (max < v) max = v;
+ }
+
+ const float d = (max - min) / ((1 << 4) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ dst.d = d;
+ dst.m = min;
+
+ for (int j = 0; j < QK4_1/2; ++j) {
+ const float x0 = (src[0 + j] - min)*id;
+ const float x1 = (src[QK4_1/2 + j] - min)*id;
+
+ const uint8_t xi0 = MIN(15, (int8_t)(x0 + 0.5f));
+ const uint8_t xi1 = MIN(15, (int8_t)(x1 + 0.5f));
+
+ dst.qs[j] = xi0;
+ dst.qs[j] |= xi1 << 4;
+ }
+}
+
+void quantize_q5_0(device const float * src, device block_q5_0 & dst) {
+#pragma METAL fp math_mode(safe)
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < QK5_0; j++) {
+ const float v = src[j];
+ if (amax < fabs(v)) {
+ amax = fabs(v);
+ max = v;
+ }
+ }
+
+ const float d = max / -16;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ dst.d = d;
+
+ uint32_t qh = 0;
+ for (int j = 0; j < QK5_0/2; ++j) {
+ const float x0 = src[0 + j]*id;
+ const float x1 = src[QK5_0/2 + j]*id;
+
+ const uint8_t xi0 = MIN(31, (int8_t)(x0 + 16.5f));
+ const uint8_t xi1 = MIN(31, (int8_t)(x1 + 16.5f));
+
+ dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4);
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_0/2);
+ }
+
+ thread const uint8_t * qh8 = (thread const uint8_t *)&qh;
+
+ for (int j = 0; j < 4; ++j) {
+ dst.qh[j] = qh8[j];
+ }
+}
+
+void quantize_q5_1(device const float * src, device block_q5_1 & dst) {
+#pragma METAL fp math_mode(safe)
+ float max = src[0];
+ float min = src[0];
+
+ for (int j = 1; j < QK5_1; j++) {
+ const float v = src[j];
+ min = v < min ? v : min;
+ max = v > max ? v : max;
+ }
+
+ const float d = (max - min) / 31;
+ const float id = d ? 1.0f/d : 0.0f;
+
+ dst.d = d;
+ dst.m = min;
+
+ uint32_t qh = 0;
+ for (int j = 0; j < QK5_1/2; ++j) {
+ const float x0 = (src[0 + j] - min)*id;
+ const float x1 = (src[QK5_1/2 + j] - min)*id;
+
+ const uint8_t xi0 = (uint8_t)(x0 + 0.5f);
+ const uint8_t xi1 = (uint8_t)(x1 + 0.5f);
+
+ dst.qs[j] = (xi0 & 0xf) | ((xi1 & 0xf) << 4);
+ qh |= ((xi0 & 0x10u) >> 4) << (j + 0);
+ qh |= ((xi1 & 0x10u) >> 4) << (j + QK5_1/2);
+ }
+
+ thread const uint8_t * qh8 = (thread const uint8_t *)&qh;
+
+ for (int j = 0; j < 4; ++j) {
+ dst.qh[j] = qh8[j];
+ }
+}
+
+void quantize_q8_0(device const float * src, device block_q8_0 & dst) {
+#pragma METAL fp math_mode(safe)
+ float amax = 0.0f; // absolute max
+
+ for (int j = 0; j < QK8_0; j++) {
+ const float v = src[j];
+ amax = MAX(amax, fabs(v));
+ }
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ dst.d = d;
+
+ for (int j = 0; j < QK8_0; ++j) {
+ const float x0 = src[j]*id;
+
+ dst.qs[j] = round(x0);
+ }
+}
+
+void quantize_iq4_nl(device const float * src, device block_iq4_nl & dst) {
+#pragma METAL fp math_mode(safe)
+ float amax = 0.0f; // absolute max
+ float max = 0.0f;
+
+ for (int j = 0; j < QK4_NL; j++) {
+ const float v = src[j];
+ if (amax < fabs(v)) {
+ amax = fabs(v);
+ max = v;
+ }
+ }
+
+ const float d = max / kvalues_iq4nl_f[0];
+ const float id = d ? 1.0f/d : 0.0f;
+
+ float sumqx = 0, sumq2 = 0;
+ for (int j = 0; j < QK4_NL/2; ++j) {
+ const float x0 = src[0 + j]*id;
+ const float x1 = src[QK4_NL/2 + j]*id;
+
+ const uint8_t xi0 = best_index_int8(16, kvalues_iq4nl_f, x0);
+ const uint8_t xi1 = best_index_int8(16, kvalues_iq4nl_f, x1);
+
+ dst.qs[j] = xi0 | (xi1 << 4);
+
+ const float v0 = kvalues_iq4nl_f[xi0];
+ const float v1 = kvalues_iq4nl_f[xi1];
+ const float w0 = src[0 + j]*src[0 + j];
+ const float w1 = src[QK4_NL/2 + j]*src[QK4_NL/2 + j];
+ sumqx += w0*v0*src[j] + w1*v1*src[QK4_NL/2 + j];
+ sumq2 += w0*v0*v0 + w1*v1*v1;
+
+ }
+
+ dst.d = sumq2 > 0 ? sumqx/sumq2 : d;
+}
+
+template <typename type4x4>
+void dequantize_q4_1(device const block_q4_1 * xb, short il, thread type4x4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 2);
+ const float d1 = il ? (xb->d / 16.h) : xb->d;
+ const float d2 = d1 / 256.f;
+ const float m = xb->m;
+ const ushort mask0 = il ? 0x00F0 : 0x000F;
+ const ushort mask1 = mask0 << 8;
+
+ float4x4 reg_f;
+
+ for (int i = 0; i < 8; i++) {
+ reg_f[i/2][2*(i%2) + 0] = ((qs[i] & mask0) * d1) + m;
+ reg_f[i/2][2*(i%2) + 1] = ((qs[i] & mask1) * d2) + m;
+ }
+
+ reg = (type4x4) reg_f;
+}
+
+template <typename type4>
+void dequantize_q4_1_t4(device const block_q4_1 * xb, short il, thread type4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 2);
+ const float d1 = (il/4) ? (xb->d / 16.h) : xb->d;
+ const float d2 = d1 / 256.f;
+ const float m = xb->m;
+ const ushort mask0 = (il/4) ? 0x00F0 : 0x000F;
+ const ushort mask1 = mask0 << 8;
+
+ for (int i = 0; i < 2; i++) {
+ reg[2*i + 0] = d1 * (qs[2*(il%4) + i] & mask0) + m;
+ reg[2*i + 1] = d2 * (qs[2*(il%4) + i] & mask1) + m;
+ }
+}
+
+template <typename type4x4>
+void dequantize_q5_0(device const block_q5_0 * xb, short il, thread type4x4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 3);
+ const float d = xb->d;
+ const float md = -16.h * xb->d;
+ const ushort mask = il ? 0x00F0 : 0x000F;
+
+ const uint32_t qh = *((device const uint32_t *)xb->qh);
+
+ const int x_mv = il ? 4 : 0;
+
+ const int gh_mv = il ? 12 : 0;
+ const int gh_bk = il ? 0 : 4;
+
+ float4x4 reg_f;
+
+ for (int i = 0; i < 8; i++) {
+ // extract the 5-th bits for x0 and x1
+ const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
+ const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
+
+ // combine the 4-bits from qs with the 5th bit
+ const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
+ const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
+
+ reg_f[i/2][2*(i%2) + 0] = d * x0 + md;
+ reg_f[i/2][2*(i%2) + 1] = d * x1 + md;
+ }
+
+ reg = (type4x4) reg_f;
+}
+
+template <typename type4>
+void dequantize_q5_0_t4(device const block_q5_0 * xb, short il, thread type4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 3);
+ const float d = xb->d;
+ const float md = -16.h * xb->d;
+ const ushort mask = (il/4) ? 0x00F0 : 0x000F;
+
+ const uint32_t qh = *((device const uint32_t *)xb->qh);
+
+ const int x_mv = (il/4) ? 4 : 0;
+
+ const int gh_mv = (il/4) ? 12 : 0;
+ const int gh_bk = (il/4) ? 0 : 4;
+
+ for (int ii = 0; ii < 2; ii++) {
+ int i = 2*(il%4) + ii;
+
+ // extract the 5-th bits for x0 and x1
+ const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
+ const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
+
+ // combine the 4-bits from qs with the 5th bit
+ const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
+ const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
+
+ reg[2*ii + 0] = d * x0 + md;
+ reg[2*ii + 1] = d * x1 + md;
+ }
+}
+
+template <typename type4x4>
+void dequantize_q5_1(device const block_q5_1 * xb, short il, thread type4x4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 4);
+ const float d = xb->d;
+ const float m = xb->m;
+ const ushort mask = il ? 0x00F0 : 0x000F;
+
+ const uint32_t qh = *((device const uint32_t *)xb->qh);
+
+ const int x_mv = il ? 4 : 0;
+
+ const int gh_mv = il ? 12 : 0;
+ const int gh_bk = il ? 0 : 4;
+
+ float4x4 reg_f;
+
+ for (int i = 0; i < 8; i++) {
+ // extract the 5-th bits for x0 and x1
+ const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
+ const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
+
+ // combine the 4-bits from qs with the 5th bit
+ const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
+ const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
+
+ reg_f[i/2][2*(i%2) + 0] = d * x0 + m;
+ reg_f[i/2][2*(i%2) + 1] = d * x1 + m;
+ }
+
+ reg = (type4x4) reg_f;
+}
+
+template <typename type4>
+void dequantize_q5_1_t4(device const block_q5_1 * xb, short il, thread type4 & reg) {
+ device const uint16_t * qs = ((device const uint16_t *)xb + 4);
+ const float d = xb->d;
+ const float m = xb->m;
+ const ushort mask = (il/4) ? 0x00F0 : 0x000F;
+
+ const uint32_t qh = *((device const uint32_t *)xb->qh);
+
+ const int x_mv = (il/4) ? 4 : 0;
+
+ const int gh_mv = (il/4) ? 12 : 0;
+ const int gh_bk = (il/4) ? 0 : 4;
+
+ for (int ii = 0; ii < 2; ii++) {
+ int i = 2*(il%4) + ii;
+
+ // extract the 5-th bits for x0 and x1
+ const uint8_t xh_0 = ((qh >> (gh_mv + 2*i )) << gh_bk) & 0x10;
+ const uint8_t xh_1 = ((qh >> (gh_mv + 2*i+1)) << gh_bk) & 0x10;
+
+ // combine the 4-bits from qs with the 5th bit
+ const int32_t x0 = ((((qs[i] ) & mask) >> x_mv) | xh_0);
+ const int32_t x1 = ((((qs[i] >> 8) & mask) >> x_mv) | xh_1);
+
+ reg[2*ii + 0] = d * x0 + m;
+ reg[2*ii + 1] = d * x1 + m;
+ }
+}
+
+template <typename type4x4>
+void dequantize_q8_0(device const block_q8_0 *xb, short il, thread type4x4 & reg) {
+ device const int8_t * qs = ((device const int8_t *)xb->qs);
+ const float d = xb->d;
+
+ float4x4 reg_f;
+
+ for (int i = 0; i < 16; i++) {
+ reg_f[i/4][i%4] = (qs[i + 16*il] * d);
+ }
+
+ reg = (type4x4) reg_f;
+}
+
+template <typename type4>
+void dequantize_q8_0_t4(device const block_q8_0 *xb, short il, thread type4 & reg) {
+ device const int8_t * qs = ((device const int8_t *)xb->qs);
+ const float d = xb->d;
+
+ for (int i = 0; i < 4; i++) {
+ reg[i] = (qs[4*(il%4) + i + 16*(il/4)] * d);
+ }
+}
+
+template <typename type4x4>
+void dequantize_mxfp4(device const block_mxfp4 * xb, short il, thread type4x4 & reg) {
+ device const uint8_t * q2 = (device const uint8_t *)xb->qs;
+
+ const float d = e8m0_to_fp32(xb->e);
+ const uint8_t shr = il >= 1 ? 4 : 0;
+
+ for (int i = 0; i < 4; ++i) {
+ reg[i][0] = d * kvalues_mxfp4_f[(q2[4*i + 0] >> shr) & 0x0F];
+ reg[i][1] = d * kvalues_mxfp4_f[(q2[4*i + 1] >> shr) & 0x0F];
+ reg[i][2] = d * kvalues_mxfp4_f[(q2[4*i + 2] >> shr) & 0x0F];
+ reg[i][3] = d * kvalues_mxfp4_f[(q2[4*i + 3] >> shr) & 0x0F];
+ }
+}
+
+template <typename type4>
+void dequantize_mxfp4_t4(device const block_mxfp4 * xb, short il, thread type4 & reg) {
+ device const uint8_t * q2 = (device const uint8_t *)xb->qs;
+
+ const float d = e8m0_to_fp32(xb->e);
+ const short il4 = il%4;
+
+ const uint8_t shr = il >= 4 ? 4 : 0;
+
+ reg[0] = d * kvalues_mxfp4_f[(q2[4*il4 + 0] >> shr) & 0x0F];
+ reg[1] = d * kvalues_mxfp4_f[(q2[4*il4 + 1] >> shr) & 0x0F];
+ reg[2] = d * kvalues_mxfp4_f[(q2[4*il4 + 2] >> shr) & 0x0F];
+ reg[3] = d * kvalues_mxfp4_f[(q2[4*il4 + 3] >> shr) & 0x0F];
+}
+
+template <typename type4x4>
+void dequantize_q2_K(device const block_q2_K *xb, short il, thread type4x4 & reg) {
+ const float d = xb->d;
+ const float min = xb->dmin;
+ device const uint8_t * q = (device const uint8_t *)xb->qs;
+ float dl, ml;
+ uint8_t sc = xb->scales[il];
+
+ q = q + 32*(il/8) + 16*(il&1);
+ il = (il/2)%4;
+
+ half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
+ uchar mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
+ dl = d * (sc & 0xF) * coef, ml = min * (sc >> 4);
+ for (int i = 0; i < 16; ++i) {
+ reg[i/4][i%4] = dl * (q[i] & mask) - ml;
+ }
+}
+
+template <typename type4x4>
+void dequantize_q3_K(device const block_q3_K *xb, short il, thread type4x4 & reg) {
+ const half d_all = xb->d;
+ device const uint8_t * q = (device const uint8_t *)xb->qs;
+ device const uint8_t * h = (device const uint8_t *)xb->hmask;
+ device const int8_t * scales = (device const int8_t *)xb->scales;
+
+ q = q + 32 * (il/8) + 16 * (il&1);
+ h = h + 16 * (il&1);
+ uint8_t m = 1 << (il/2);
+ uint16_t kmask1 = (il/4)>1 ? ((il/4)>2 ? 192 : 48) : \
+ ((il/4)>0 ? 12 : 3);
+ uint16_t kmask2 = il/8 ? 0xF0 : 0x0F;
+ uint16_t scale_2 = scales[il%8], scale_1 = scales[8 + il%4];
+ int16_t dl_int = (il/4)&1 ? (scale_2&kmask2) | ((scale_1&kmask1) << 2)
+ : (scale_2&kmask2) | ((scale_1&kmask1) << 4);
+ float dl = il<8 ? d_all * (dl_int - 32.f) : d_all * (dl_int / 16.f - 32.f);
+ const float ml = 4.f * dl;
+
+ il = (il/2) & 3;
+ const half coef = il>1 ? (il>2 ? 1/64.h : 1/16.h) : (il>0 ? 1/4.h : 1.h);
+ const uint8_t mask = il>1 ? (il>2 ? 192 : 48) : (il>0 ? 12 : 3);
+ dl *= coef;
+
+ for (int i = 0; i < 16; ++i) {
+ reg[i/4][i%4] = dl * (q[i] & mask) - (h[i] & m ? 0 : ml);
+ }
+}
+
+static inline uchar2 get_scale_min_k4_just2(int j, int k, device const uchar * q) {
+ return j < 4 ? uchar2{uchar(q[j+0+k] & 63), uchar(q[j+4+k] & 63)}
+ : uchar2{uchar((q[j+4+k] & 0xF) | ((q[j-4+k] & 0xc0) >> 2)), uchar((q[j+4+k] >> 4) | ((q[j-0+k] & 0xc0) >> 2))};
+}
+
+template <typename type4x4>
+void dequantize_q4_K(device const block_q4_K * xb, short il, thread type4x4 & reg) {
+ device const uchar * q = xb->qs;
+
+ short is = (il/4) * 2;
+ q = q + (il/4) * 32 + 16 * (il&1);
+ il = il & 3;
+ const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales);
+ const float d = il < 2 ? xb->d : xb->d / 16.h;
+ const float min = xb->dmin;
+ const float dl = d * sc[0];
+ const float ml = min * sc[1];
+
+ const ushort mask = il < 2 ? 0x0F : 0xF0;
+ for (int i = 0; i < 16; ++i) {
+ reg[i/4][i%4] = dl * (q[i] & mask) - ml;
+ }
+}
+
+template <typename type4x4>
+void dequantize_q5_K(device const block_q5_K *xb, short il, thread type4x4 & reg) {
+ device const uint8_t * q = xb->qs;
+ device const uint8_t * qh = xb->qh;
+
+ short is = (il/4) * 2;
+ q = q + 32 * (il/4) + 16 * (il&1);
+ qh = qh + 16 * (il&1);
+ uint8_t ul = 1 << (il/2);
+ il = il & 3;
+ const uchar2 sc = get_scale_min_k4_just2(is, il/2, xb->scales);
+ const float d = il < 2 ? xb->d : xb->d / 16.f;
+ const float min = xb->dmin;
+ const float dl = d * sc[0];
+ const float ml = min * sc[1];
+
+ const ushort mask = il<2 ? 0x0F : 0xF0;
+ const float qh_val = il<2 ? 16.f : 256.f;
+ for (int i = 0; i < 16; ++i) {
+ reg[i/4][i%4] = dl * ((q[i] & mask) + (qh[i] & ul ? qh_val : 0)) - ml;
+ }
+}
+
+template <typename type4x4>
+void dequantize_q6_K(device const block_q6_K *xb, short il, thread type4x4 & reg) {
+ const half d_all = xb->d;
+ device const uint16_t * ql = (device const uint16_t *)xb->ql;
+ device const uint16_t * qh = (device const uint16_t *)xb->qh;
+ device const int8_t * scales = (device const int8_t *)xb->scales;
+
+ ql = ql + 32*(il/8) + 16*((il/2)&1) + 8*(il&1);
+ qh = qh + 16*(il/8) + 8*(il&1);
+ float sc = scales[(il%2) + 2 * ((il/2))];
+ il = (il/2) & 3;
+
+ const uint32_t kmask1 = il>1 ? (il>2 ? 0xC0C0C0C0 : 0x30303030) : (il>0 ? 0x0C0C0C0C : 0x03030303);
+ const uint32_t kmask2 = il>1 ? 0xF0F0F0F0 : 0x0F0F0F0F;
+ const float ml = d_all * sc * 32.f;
+ const float dl0 = d_all * sc;
+ const float dl1 = dl0 / 256.f;
+ const float dl2 = dl0 / (256.f * 256.f);
+ const float dl3 = dl0 / (256.f * 256.f * 256.f);
+ const uint8_t shr_h = il>2 ? 2 : 0;
+ const uint8_t shl_h = il>1 ? 0 : (il>0 ? 2 : 4);
+ const uint8_t shr_l = il>1 ? 4 : 0;
+ for (int i = 0; i < 4; ++i) {
+ const uint32_t low = (ql[2*i] | (uint32_t)(ql[2*i+1] << 16)) & kmask2;
+ const uint32_t high = (qh[2*i] | (uint32_t)(qh[2*i+1] << 16)) & kmask1;
+ const uint32_t q = ((high << shl_h) >> shr_h) | (low >> shr_l);
+ reg[i][0] = dl0 * ((half)(q & 0xFF)) - ml;
+ reg[i][1] = dl1 * ((float)(q & 0xFF00)) - ml;
+ reg[i][2] = dl2 * ((float)(q & 0xFF0000)) - ml;
+ reg[i][3] = dl3 * ((float)(q & 0xFF000000)) - ml;
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq2_xxs(device const block_iq2_xxs * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const float d = xb->d;
+ const int ib32 = il/2;
+ il = il%2;
+ // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16
+ // each block of 32 needs 2 uint32_t's for the quants & scale, so 4 uint16_t's.
+ device const uint16_t * q2 = xb->qs + 4*ib32;
+ const uint32_t aux32_g = q2[0] | (q2[1] << 16);
+ const uint32_t aux32_s = q2[2] | (q2[3] << 16);
+ thread const uint8_t * aux8 = (thread const uint8_t *)&aux32_g;
+ const float dl = d * (0.5f + (aux32_s >> 28)) * 0.25f;
+ constant uint8_t * grid = (constant uint8_t *)(iq2xxs_grid + aux8[2*il+0]);
+ uint8_t signs = ksigns_iq2xs[(aux32_s >> 14*il) & 127];
+ for (int i = 0; i < 8; ++i) {
+ reg[i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f);
+ }
+ grid = (constant uint8_t *)(iq2xxs_grid + aux8[2*il+1]);
+ signs = ksigns_iq2xs[(aux32_s >> (14*il+7)) & 127];
+ for (int i = 0; i < 8; ++i) {
+ reg[2+i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f);
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq2_xs(device const block_iq2_xs * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const float d = xb->d;
+ const int ib32 = il/2;
+ il = il%2;
+ // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16
+ device const uint16_t * q2 = xb->qs + 4*ib32;
+ const float dl = d * (0.5f + ((xb->scales[ib32] >> 4*il) & 0xf)) * 0.25f;
+ constant uint8_t * grid = (constant uint8_t *)(iq2xs_grid + (q2[2*il+0] & 511));
+ uint8_t signs = ksigns_iq2xs[q2[2*il+0] >> 9];
+ for (int i = 0; i < 8; ++i) {
+ reg[i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f);
+ }
+ grid = (constant uint8_t *)(iq2xs_grid + (q2[2*il+1] & 511));
+ signs = ksigns_iq2xs[q2[2*il+1] >> 9];
+ for (int i = 0; i < 8; ++i) {
+ reg[2+i/4][i%4] = dl * grid[i] * (signs & kmask_iq2xs[i] ? -1.f : 1.f);
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq3_xxs(device const block_iq3_xxs * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const float d = xb->d;
+ const int ib32 = il/2;
+ il = il%2;
+ // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16
+ device const uint8_t * q3 = xb->qs + 8*ib32;
+ device const uint16_t * gas = (device const uint16_t *)(xb->qs + QK_K/4) + 2*ib32;
+ const uint32_t aux32 = gas[0] | (gas[1] << 16);
+ const float dl = d * (0.5f + (aux32 >> 28)) * 0.5f;
+ constant uint8_t * grid1 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+0]);
+ constant uint8_t * grid2 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+1]);
+ uint8_t signs = ksigns_iq2xs[(aux32 >> 14*il) & 127];
+ for (int i = 0; i < 4; ++i) {
+ reg[0][i] = dl * grid1[i] * (signs & kmask_iq2xs[i+0] ? -1.f : 1.f);
+ reg[1][i] = dl * grid2[i] * (signs & kmask_iq2xs[i+4] ? -1.f : 1.f);
+ }
+ grid1 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+2]);
+ grid2 = (constant uint8_t *)(iq3xxs_grid + q3[4*il+3]);
+ signs = ksigns_iq2xs[(aux32 >> (14*il+7)) & 127];
+ for (int i = 0; i < 4; ++i) {
+ reg[2][i] = dl * grid1[i] * (signs & kmask_iq2xs[i+0] ? -1.f : 1.f);
+ reg[3][i] = dl * grid2[i] * (signs & kmask_iq2xs[i+4] ? -1.f : 1.f);
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq3_s(device const block_iq3_s * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const float d = xb->d;
+ const int ib32 = il/2;
+ il = il%2;
+ // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16
+ device const uint8_t * qs = xb->qs + 8*ib32;
+ device const uint8_t * signs = xb->signs + 4*ib32 + 2*il;
+ const uint8_t qh = xb->qh[ib32] >> 4*il;
+ const float dl = d * (1 + 2*((xb->scales[ib32/2] >> 4*(ib32%2)) & 0xf));
+ constant uint8_t * grid1 = (constant uint8_t *)(iq3s_grid + (qs[4*il+0] | ((qh << 8) & 256)));
+ constant uint8_t * grid2 = (constant uint8_t *)(iq3s_grid + (qs[4*il+1] | ((qh << 7) & 256)));
+ for (int i = 0; i < 4; ++i) {
+ reg[0][i] = dl * grid1[i] * select(1, -1, signs[0] & kmask_iq2xs[i+0]);
+ reg[1][i] = dl * grid2[i] * select(1, -1, signs[0] & kmask_iq2xs[i+4]);
+ }
+ grid1 = (constant uint8_t *)(iq3s_grid + (qs[4*il+2] | ((qh << 6) & 256)));
+ grid2 = (constant uint8_t *)(iq3s_grid + (qs[4*il+3] | ((qh << 5) & 256)));
+ for (int i = 0; i < 4; ++i) {
+ reg[2][i] = dl * grid1[i] * select(1, -1, signs[1] & kmask_iq2xs[i+0]);
+ reg[3][i] = dl * grid2[i] * select(1, -1, signs[1] & kmask_iq2xs[i+4]);
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq2_s(device const block_iq2_s * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const float d = xb->d;
+ const int ib32 = il/2;
+ il = il%2;
+ // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16
+ device const uint8_t * qs = xb->qs + 4*ib32 + 2*il;
+ device const uint8_t * signs = qs + QK_K/8;
+ const uint8_t qh = xb->qh[ib32] >> 4*il;
+ const float dl = d * (0.5f + ((xb->scales[ib32] >> 4*il) & 0xf)) * 0.25f;
+ constant uint8_t * grid1 = (constant uint8_t *)(iq2s_grid + (qs[0] | ((qh << 8) & 0x300)));
+ constant uint8_t * grid2 = (constant uint8_t *)(iq2s_grid + (qs[1] | ((qh << 6) & 0x300)));
+ for (int i = 0; i < 8; ++i) {
+ reg[i/4+0][i%4] = dl * grid1[i] * select(1, -1, signs[0] & kmask_iq2xs[i]);
+ reg[i/4+2][i%4] = dl * grid2[i] * select(1, -1, signs[1] & kmask_iq2xs[i]);
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq1_s(device const block_iq1_s * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const int ib32 = il/2;
+ il = il%2;
+ const float d = xb->d;
+ device const uint8_t * qs = xb->qs + 4*ib32 + 2*il;
+ device const uint16_t * qh = xb->qh;
+ const float dl = d * (2*((qh[ib32] >> 12) & 7) + 1);
+ const float ml = dl * (qh[ib32] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA);
+ const uint16_t h = qh[ib32] >> 6*il;
+ constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((h << 8) & 0x700)));
+ constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((h << 5) & 0x700)));
+ for (int i = 0; i < 4; ++i) {
+ reg[0][i] = dl * (grid1[i] & 0xf) + ml;
+ reg[1][i] = dl * (grid1[i] >> 4) + ml;
+ reg[2][i] = dl * (grid2[i] & 0xf) + ml;
+ reg[3][i] = dl * (grid2[i] >> 4) + ml;
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq1_m(device const block_iq1_m * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const int ib32 = il/2;
+ il = il%2;
+ device const uint16_t * sc = (device const uint16_t *)xb->scales;
+
+ iq1m_scale_t scale;
+ scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
+ const float d = scale.f16;
+
+ device const uint8_t * qs = xb->qs + 4*ib32 + 2*il;
+ device const uint8_t * qh = xb->qh + 2*ib32 + il;
+
+ const float dl = d * (2*((sc[ib32/2] >> (6*(ib32%2)+3*il)) & 7) + 1);
+ const float ml1 = dl * (qh[0] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA);
+ const float ml2 = dl * (qh[0] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA);
+ constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700)));
+ constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 4) & 0x700)));
+ for (int i = 0; i < 4; ++i) {
+ reg[0][i] = dl * (grid1[i] & 0xf) + ml1;
+ reg[1][i] = dl * (grid1[i] >> 4) + ml1;
+ reg[2][i] = dl * (grid2[i] & 0xf) + ml2;
+ reg[3][i] = dl * (grid2[i] >> 4) + ml2;
+ }
+}
+
+template <typename type4x4>
+void dequantize_iq4_nl(device const block_iq4_nl * xb, short il, thread type4x4 & reg) {
+ device const uint16_t * q4 = (device const uint16_t *)xb->qs;
+ const float d = xb->d;
+ uint32_t aux32;
+ thread const uint8_t * q8 = (thread const uint8_t *)&aux32;
+ for (int i = 0; i < 4; ++i) {
+ aux32 = ((q4[2*i] | (q4[2*i+1] << 16)) >> 4*il) & 0x0f0f0f0f;
+ reg[i][0] = d * kvalues_iq4nl_f[q8[0]];
+ reg[i][1] = d * kvalues_iq4nl_f[q8[1]];
+ reg[i][2] = d * kvalues_iq4nl_f[q8[2]];
+ reg[i][3] = d * kvalues_iq4nl_f[q8[3]];
+ }
+}
+
+template <typename type4>
+void dequantize_iq4_nl_t4(device const block_iq4_nl * xb, short il, thread type4 & reg) {
+ device const uint16_t * q4 = (device const uint16_t *)xb->qs;
+ const float d = xb->d;
+ uint32_t aux32;
+ thread const uint8_t * q8 = (thread const uint8_t *)&aux32;
+ aux32 = ((q4[2*(il%4)] | (q4[2*(il%4)+1] << 16)) >> 4*(il/4)) & 0x0f0f0f0f;
+ reg[0] = d * kvalues_iq4nl_f[q8[0]];
+ reg[1] = d * kvalues_iq4nl_f[q8[1]];
+ reg[2] = d * kvalues_iq4nl_f[q8[2]];
+ reg[3] = d * kvalues_iq4nl_f[q8[3]];
+}
+
+template <typename type4x4>
+void dequantize_iq4_xs(device const block_iq4_xs * xb, short il, thread type4x4 & reg) {
+ // il is 0...15 for QK_K = 256 => index of block of 32 is il/2
+ const int ib32 = il/2;
+ il = il%2;
+ // il = 0 or 1. il = 0 processes the first 16 quants in a block of 32, il = 1 the second 16
+ device const uint32_t * q4 = (device const uint32_t *)xb->qs + 4*ib32;
+ const int ls = ((xb->scales_l[ib32/2] >> 4*(ib32%2)) & 0xf) | (((xb->scales_h >> 2*ib32) & 3) << 4);
+ const float d = (float)xb->d * (ls - 32);
+ uint32_t aux32;
+ thread const uint8_t * q8 = (thread const uint8_t *)&aux32;
+ for (int i = 0; i < 4; ++i) {
+ aux32 = (q4[i] >> 4*il) & 0x0f0f0f0f;
+ reg[i][0] = d * kvalues_iq4nl_f[q8[0]];
+ reg[i][1] = d * kvalues_iq4nl_f[q8[1]];
+ reg[i][2] = d * kvalues_iq4nl_f[q8[2]];
+ reg[i][3] = d * kvalues_iq4nl_f[q8[3]];
+ }
+}
+
+enum ggml_sort_order {
+ GGML_SORT_ORDER_ASC,
+ GGML_SORT_ORDER_DESC,
+};
+
+constant float GELU_COEF_A = 0.044715f;
+constant float GELU_QUICK_COEF = -1.702f;
+constant float SQRT_2_OVER_PI = 0.79788456080286535587989211986876f;
+constant float SQRT_2_INV = 0.70710678118654752440084436210484f;
+
+// based on Abramowitz and Stegun formula 7.1.26 or similar Hastings' approximation
+// ref: https://www.johndcook.com/blog/python_erf/
+constant float p_erf = 0.3275911f;
+constant float a1_erf = 0.254829592f;
+constant float a2_erf = -0.284496736f;
+constant float a3_erf = 1.421413741f;
+constant float a4_erf = -1.453152027f;
+constant float a5_erf = 1.061405429f;
+
+template<typename T>
+inline T erf_approx(T x) {
+ T sign_x = sign(x);
+ x = fabs(x);
+ T t = 1.0f / (1.0f + p_erf * x);
+ T y = 1.0f - (((((a5_erf * t + a4_erf) * t) + a3_erf) * t + a2_erf) * t + a1_erf) * t * exp(-x * x);
+ return sign_x * y;
+}
+
+template<typename T> T elu_approx(T x);
+
+template<> inline float elu_approx<float>(float x) {
+ return (x > 0.f) ? x : (exp(x) - 1);
+}
+
+template<> inline float4 elu_approx<float4>(float4 x) {
+ float4 res;
+
+ res[0] = (x[0] > 0.0f) ? x[0] : (exp(x[0]) - 1.0f);
+ res[1] = (x[1] > 0.0f) ? x[1] : (exp(x[1]) - 1.0f);
+ res[2] = (x[2] > 0.0f) ? x[2] : (exp(x[2]) - 1.0f);
+ res[3] = (x[3] > 0.0f) ? x[3] : (exp(x[3]) - 1.0f);
+
+ return res;
+}
+
+constant short FC_unary_op [[function_constant(FC_UNARY + 0)]];
+constant bool FC_unary_cnt[[function_constant(FC_UNARY + 1)]];
+
+template <typename T0, typename T, typename TC>
+kernel void kernel_unary_impl(
+ constant ggml_metal_kargs_unary & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+#define FC_OP FC_unary_op
+#define FC_CNT FC_unary_cnt
+
+ device const T0 * src0_ptr;
+ device T * dst_ptr;
+
+ int i0;
+
+ if (FC_CNT) {
+ i0 = tgpig.x;
+
+ src0_ptr = (device const T0 *) (src0);
+ dst_ptr = (device T *) (dst);
+ } else {
+ const int i03 = tgpig.z;
+ const int i02 = tgpig.y;
+ const int k0 = tgpig.x/args.ne01;
+ const int i01 = tgpig.x - k0*args.ne01;
+
+ i0 = k0*ntg.x + tpitg.x;
+
+ src0_ptr = (device const T0 *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01);
+ dst_ptr = (device T *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 );
+ }
+
+ {
+ //threadgroup_barrier(mem_flags::mem_none);
+
+ if (!FC_CNT) {
+ if (i0 >= args.ne0) {
+ return;
+ }
+ }
+
+ const TC x = (TC) src0_ptr[i0];
+
+ if (FC_OP == OP_UNARY_NUM_SCALE) {
+ dst_ptr[i0] = (T) (args.scale * x + args.bias);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_FILL) {
+ dst_ptr[i0] = (T) args.val;
+ }
+
+ if (FC_OP == OP_UNARY_NUM_CLAMP) {
+ dst_ptr[i0] = (T) clamp(x, args.min, args.max);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_SQR) {
+ dst_ptr[i0] = (T) (x * x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_SQRT) {
+ dst_ptr[i0] = (T) sqrt(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_SIN) {
+ dst_ptr[i0] = (T) sin(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_COS) {
+ dst_ptr[i0] = (T) cos(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_LOG) {
+ dst_ptr[i0] = (T) log(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_LEAKY_RELU) {
+ dst_ptr[i0] = (T) (TC(x > 0)*x + TC(x <= 0)*(x * args.slope));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_TANH) {
+ dst_ptr[i0] = (T) precise::tanh(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_RELU) {
+ dst_ptr[i0] = (T) fmax(0, x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_SIGMOID) {
+ dst_ptr[i0] = (T) (1 / (1 + exp(-x)));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_GELU) {
+ dst_ptr[i0] = (T) (0.5*x*(1 + precise::tanh(SQRT_2_OVER_PI*x*(1 + GELU_COEF_A*x*x))));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_GELU_ERF) {
+ dst_ptr[i0] = (T) (0.5*x*(1 + erf_approx(SQRT_2_INV*x)));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_GELU_QUICK) {
+ dst_ptr[i0] = (T) (x * (1/(1 + exp(GELU_QUICK_COEF*x))));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_SILU) {
+ dst_ptr[i0] = (T) (x / (1 + exp(-x)));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_ELU) {
+ dst_ptr[i0] = (T) elu_approx(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_NEG) {
+ dst_ptr[i0] = (T) -x;
+ }
+
+ if (FC_OP == OP_UNARY_NUM_ABS) {
+ dst_ptr[i0] = (T) fabs(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_SGN) {
+ dst_ptr[i0] = T(x > 0) - T(x < 0);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_STEP) {
+ dst_ptr[i0] = T(x > 0);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_HARDSWISH) {
+ dst_ptr[i0] = (T) (x * fmax(0, fmin(1, x/6 + 0.5)));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_HARDSIGMOID) {
+ dst_ptr[i0] = (T) fmax(0, fmin(1, x/6 + 0.5));
+ }
+
+ if (FC_OP == OP_UNARY_NUM_EXP) {
+ dst_ptr[i0] = (T) exp(x);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_SOFTPLUS) {
+ dst_ptr[i0] = (T) select(log(1 + exp(x)), x, x > 20);
+ }
+
+ if (FC_OP == OP_UNARY_NUM_EXPM1) {
+ // TODO: precise implementation
+ dst_ptr[i0] = (T) (exp(x) - 1);
+ }
+ }
+
+#undef FC_OP
+#undef FC_CNT
+}
+
+typedef decltype(kernel_unary_impl<float, float, float>) kernel_unary_t;
+
+template [[host_name("kernel_unary_f32_f32")]] kernel kernel_unary_t kernel_unary_impl<float, float, float>;
+template [[host_name("kernel_unary_f32_f32_4")]] kernel kernel_unary_t kernel_unary_impl<float4, float4, float4>;
+template [[host_name("kernel_unary_f16_f16")]] kernel kernel_unary_t kernel_unary_impl<half, half, float>;
+template [[host_name("kernel_unary_f16_f16_4")]] kernel kernel_unary_t kernel_unary_impl<half4, half4, float4>;
+
+// OP: 0 - add, 1 - sub, 2 - mul, 3 - div
+constant short FC_bin_op [[function_constant(FC_BIN + 0)]];
+constant short FC_bin_f [[function_constant(FC_BIN + 1)]];
+constant bool FC_bin_rb [[function_constant(FC_BIN + 2)]];
+
+template <typename T0, typename T1, typename T>
+kernel void kernel_bin_fuse_impl(
+ constant ggml_metal_kargs_bin & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+#define FC_OP FC_bin_op
+#define FC_F FC_bin_f
+#define FC_RB FC_bin_rb
+
+ if (FC_RB) {
+ // row broadcast
+ const uint i0 = tgpig.x;
+ const uint i1 = i0%args.ne10;
+
+ device const T0 * src0_row = (device const T0 *) (src0);
+ device T * dst_row = (device T *) (dst);
+
+ if (FC_F == 1) {
+ device const T1 * src1_row = (device const T1 *) (src1 + args.o1[0]);
+
+ if (FC_OP == 0) {
+ dst_row[i0] = src0_row[i0] + src1_row[i1];
+ }
+
+ if (FC_OP == 1) {
+ dst_row[i0] = src0_row[i0] - src1_row[i1];
+ }
+
+ if (FC_OP == 2) {
+ dst_row[i0] = src0_row[i0] * src1_row[i1];
+ }
+
+ if (FC_OP == 3) {
+ dst_row[i0] = src0_row[i0] / src1_row[i1];
+ }
+ } else {
+ T0 res = src0_row[i0];
+
+ if (FC_OP == 0) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res += ((device const T1 *) (src1 + args.o1[j]))[i1];
+ }
+ }
+
+ if (FC_OP == 1) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res -= ((device const T1 *) (src1 + args.o1[j]))[i1];
+ }
+ }
+
+ if (FC_OP == 2) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res *= ((device const T1 *) (src1 + args.o1[j]))[i1];
+ }
+ }
+
+ if (FC_OP == 3) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res /= ((device const T1 *) (src1 + args.o1[j]))[i1];
+ }
+ }
+
+ dst_row[i0] = res;
+ }
+ } else {
+ const int i03 = tgpig.z;
+ const int i02 = tgpig.y;
+ const int i01 = tgpig.x;
+
+ if (i01 >= args.ne01) {
+ return;
+ }
+
+ const int i13 = i03%args.ne13;
+ const int i12 = i02%args.ne12;
+ const int i11 = i01%args.ne11;
+
+ device const T0 * src0_ptr = (device const T0 *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + args.offs);
+ device T * dst_ptr = (device T *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1 + args.offs);
+
+ if (FC_F == 1) {
+ device const T1 * src1_ptr = (device const T1 *) (src1 + args.o1[0] + i13*args.nb13 + i12*args.nb12 + i11*args.nb11);
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ const int i10 = i0%args.ne10;
+
+ if (FC_OP == 0) {
+ dst_ptr[i0] = src0_ptr[i0] + src1_ptr[i10];
+ }
+
+ if (FC_OP == 1) {
+ dst_ptr[i0] = src0_ptr[i0] - src1_ptr[i10];
+ }
+
+ if (FC_OP == 2) {
+ dst_ptr[i0] = src0_ptr[i0] * src1_ptr[i10];
+ }
+
+ if (FC_OP == 3) {
+ dst_ptr[i0] = src0_ptr[i0] / src1_ptr[i10];
+ }
+ }
+ } else {
+ device const T1 * src1_ptr[8];
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ src1_ptr[j] = (device const T1 *) (src1 + args.o1[j] + i13*args.nb13 + i12*args.nb12 + i11*args.nb11);
+ }
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ const int i10 = i0%args.ne10;
+
+ T res = src0_ptr[i0];
+
+ if (FC_OP == 0) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res += src1_ptr[j][i10];
+ }
+ }
+
+ if (FC_OP == 1) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res -= src1_ptr[j][i10];
+ }
+ }
+
+ if (FC_OP == 2) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res *= src1_ptr[j][i10];
+ }
+ }
+
+ if (FC_OP == 3) {
+ FOR_UNROLL (short j = 0; j < FC_F; ++j) {
+ res /= src1_ptr[j][i10];
+ }
+ }
+
+ dst_ptr[i0] = res;
+ }
+ }
+ }
+
+#undef FC_OP
+#undef FC_F
+#undef FC_RB
+}
+
+typedef decltype(kernel_bin_fuse_impl<float, float, float>) kernel_bin_fuse_t;
+
+template [[host_name("kernel_bin_fuse_f32_f32_f32")]] kernel kernel_bin_fuse_t kernel_bin_fuse_impl<float, float, float>;
+template [[host_name("kernel_bin_fuse_f32_f32_f32_4")]] kernel kernel_bin_fuse_t kernel_bin_fuse_impl<float4, float4, float4>;
+
+kernel void kernel_add_id(
+ constant ggml_metal_kargs_add_id & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * src2,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int i1 = tgpig.x;
+ const int i2 = tgpig.y;
+
+ const int i11 = *((device const int32_t *) (src2 + i1*sizeof(int32_t) + i2*args.nb21));
+
+ const size_t nb1 = args.ne0 * sizeof(float);
+ const size_t nb2 = args.ne1 * nb1;
+
+ device float * dst_row = (device float *)((device char *)dst + i1*nb1 + i2*nb2);
+ device const float * src0_row = (device const float *)((device char *)src0 + i1*args.nb01 + i2*args.nb02);
+ device const float * src1_row = (device const float *)((device char *)src1 + i11*args.nb11);
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ dst_row[i0] = src0_row[i0] + src1_row[i0];
+ }
+}
+
+template<typename T>
+kernel void kernel_repeat(
+ constant ggml_metal_kargs_repeat & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int i3 = tgpig.z;
+ const int i2 = tgpig.y;
+ const int i1 = tgpig.x;
+
+ const int i03 = i3%args.ne03;
+ const int i02 = i2%args.ne02;
+ const int i01 = i1%args.ne01;
+
+ device const char * src0_ptr = src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01;
+ device char * dst_ptr = dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1;
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ const int i00 = i0%args.ne00;
+ *((device T *)(dst_ptr + i0*args.nb0)) = *((device T *)(src0_ptr + i00*args.nb00));
+ }
+}
+
+typedef decltype(kernel_repeat<float>) kernel_repeat_t;
+
+template [[host_name("kernel_repeat_f32")]] kernel kernel_repeat_t kernel_repeat<float>;
+template [[host_name("kernel_repeat_f16")]] kernel kernel_repeat_t kernel_repeat<half>;
+template [[host_name("kernel_repeat_i32")]] kernel kernel_repeat_t kernel_repeat<int>;
+template [[host_name("kernel_repeat_i16")]] kernel kernel_repeat_t kernel_repeat<short>;
+
+kernel void kernel_reglu_f32(
+ constant ggml_metal_kargs_glu & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00;
+ device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10;
+ device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1);
+
+ for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) {
+ const float x0 = src0_row[i0];
+ const float x1 = src1_row[i0];
+
+ dst_row[i0] = x0*x1*(x0 > 0.0f);
+ }
+}
+
+kernel void kernel_geglu_f32(
+ constant ggml_metal_kargs_glu & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00;
+ device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10;
+ device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1);
+
+ for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) {
+ const float x0 = src0_row[i0];
+ const float x1 = src1_row[i0];
+
+ const float gelu = 0.5f*x0*(1.0f + precise::tanh(SQRT_2_OVER_PI*x0*(1.0f + GELU_COEF_A*x0*x0)));
+
+ dst_row[i0] = gelu*x1;
+ }
+}
+
+kernel void kernel_swiglu_f32(
+ constant ggml_metal_kargs_glu & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00;
+ device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10;
+ device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1);
+
+ for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) {
+ const float x0 = src0_row[i0];
+ const float x1 = src1_row[i0];
+
+ const float silu = x0 / (1.0f + exp(-x0));
+
+ dst_row[i0] = silu*x1;
+ }
+}
+
+kernel void kernel_swiglu_oai_f32(
+ constant ggml_metal_kargs_glu & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00;
+ device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10;
+ device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1);
+
+ for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) {
+ float x0 = src0_row[i0];
+ float x1 = src1_row[i0];
+
+ x0 = min(x0, args.limit);
+ x1 = max(min(x1, args.limit), -args.limit);
+
+ float out_glu = x0 / (1.0f + exp(-x0 * args.alpha));
+ out_glu = out_glu * (1.0f + x1);
+
+ dst_row[i0] = out_glu;
+ }
+}
+
+kernel void kernel_geglu_erf_f32(
+ constant ggml_metal_kargs_glu & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00;
+ device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10;
+ device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1);
+
+ for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) {
+ const float x0 = src0_row[i0];
+ const float x1 = src1_row[i0];
+
+ const float gelu_erf = 0.5f*x0*(1.0f+erf_approx<float>(x0*SQRT_2_INV));
+
+ dst_row[i0] = gelu_erf*x1;
+ }
+}
+
+kernel void kernel_geglu_quick_f32(
+ constant ggml_metal_kargs_glu & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ device const float * src0_row = (device const float *) ((device const char *) src0 + tgpig*args.nb01) + args.i00;
+ device const float * src1_row = (device const float *) ((device const char *) src1 + tgpig*args.nb11) + args.i10;
+ device float * dst_row = (device float *) ((device char *) dst + tgpig*args.nb1);
+
+ for (int i0 = tpitg; i0 < args.ne0; i0 += ntg) {
+ const float x0 = src0_row[i0];
+ const float x1 = src1_row[i0];
+
+ const float gelu_quick = x0*(1.0f/(1.0f+exp(GELU_QUICK_COEF*x0)));
+
+ dst_row[i0] = gelu_quick*x1;
+ }
+}
+
+kernel void kernel_op_sum_f32(
+ constant ggml_metal_kargs_sum & args,
+ device const float * src0,
+ device float * dst,
+ threadgroup float * shmem_f32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+
+ if (args.np == 0) {
+ return;
+ }
+
+ // TODO: become function constant
+ const uint nsg = (ntg.x + 31) / 32;
+
+ float sumf = 0;
+
+ for (uint64_t i0 = tpitg.x; i0 < args.np; i0 += ntg.x) {
+ sumf += src0[i0];
+ }
+
+ sumf = simd_sum(sumf);
+
+ if (tiisg == 0) {
+ shmem_f32[sgitg] = sumf;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ float total = 0;
+
+ if (sgitg == 0) {
+ float v = 0;
+
+ if (tpitg.x < nsg) {
+ v = shmem_f32[tpitg.x];
+ }
+
+ total = simd_sum(v);
+
+ if (tpitg.x == 0) {
+ dst[0] = total;
+ }
+ }
+}
+
+template <bool norm>
+kernel void kernel_sum_rows(
+ constant ggml_metal_kargs_sum_rows & args,
+ device const float * src0,
+ device float * dst,
+ threadgroup float * shmem_f32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ int64_t i3 = tgpig.z;
+ int64_t i2 = tgpig.y;
+ int64_t i1 = tgpig.x;
+
+ if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
+ return;
+ }
+
+ if (sgitg == 0) {
+ shmem_f32[tiisg] = 0.0f;
+ }
+
+ device const float * src_row = (device const float *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03);
+ device float * dst_row = (device float *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3);
+
+ float sumf = 0;
+
+ for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) {
+ sumf += src_row[i0];
+ }
+
+ sumf = simd_sum(sumf);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ shmem_f32[sgitg] = sumf;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sumf = shmem_f32[tiisg];
+ sumf = simd_sum(sumf);
+
+ if (tpitg.x == 0) {
+ dst_row[0] = norm ? sumf / args.ne00 : sumf;
+ }
+}
+
+typedef decltype(kernel_sum_rows<false>) kernel_sum_rows_t;
+
+template [[host_name("kernel_sum_rows_f32")]] kernel kernel_sum_rows_t kernel_sum_rows<false>;
+template [[host_name("kernel_mean_f32")]] kernel kernel_sum_rows_t kernel_sum_rows<true>;
+
+template<typename T>
+kernel void kernel_cumsum_blk(
+ constant ggml_metal_kargs_cumsum_blk & args,
+ device const char * src0,
+ device char * tmp,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int ib = tgpig[0]/args.ne01;
+
+ const int i00 = ib*ntg.x;
+ const int i01 = tgpig[0]%args.ne01;
+ const int i02 = tgpig[1];
+ const int i03 = tgpig[2];
+
+ device const float * src0_row = (device const float *) (src0 +
+ args.nb01*i01 +
+ args.nb02*i02 +
+ args.nb03*i03);
+
+ threadgroup float * shmem_f32 = (threadgroup float *) shmem;
+
+ float v = 0.0f;
+
+ if (i00 + tpitg.x < args.ne00) {
+ v = src0_row[i00 + tpitg.x];
+ }
+
+ float s = simd_prefix_inclusive_sum(v);
+
+ if (tiisg == N_SIMDWIDTH - 1) {
+ shmem_f32[sgitg] = s;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (sgitg == 0) {
+ shmem_f32[tiisg] = simd_prefix_exclusive_sum(shmem_f32[tiisg]);
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ s += shmem_f32[sgitg];
+
+ device float * dst_row = (device float *) dst +
+ args.ne00*i01 +
+ args.ne00*args.ne01*i02 +
+ args.ne00*args.ne01*args.ne02*i03;
+
+ if (i00 + tpitg.x < args.ne00) {
+ dst_row[i00 + tpitg.x] = s;
+ }
+
+ if (args.outb && tpitg.x == ntg.x - 1) {
+ device float * tmp_row = (device float *) tmp +
+ args.net0*i01 +
+ args.net0*args.net1*i02 +
+ args.net0*args.net1*args.net2*i03;
+
+ tmp_row[ib] = s;
+ }
+}
+
+typedef decltype(kernel_cumsum_blk<float>) kernel_cumsum_blk_t;
+
+template [[host_name("kernel_cumsum_blk_f32")]] kernel kernel_cumsum_blk_t kernel_cumsum_blk<float>;
+
+template<typename T>
+kernel void kernel_cumsum_add(
+ constant ggml_metal_kargs_cumsum_add & args,
+ device const char * tmp,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int ib = tgpig[0]/args.ne01;
+
+ if (ib == 0) {
+ return;
+ }
+
+ const int i00 = ib*ntg.x;
+ const int i01 = tgpig[0]%args.ne01;
+ const int i02 = tgpig[1];
+ const int i03 = tgpig[2];
+
+ device const float * tmp_row = (device const float *) (tmp +
+ args.nbt1*i01 +
+ args.nbt2*i02 +
+ args.nbt3*i03);
+
+ device float * dst_row = (device float *) dst +
+ args.ne00*i01 +
+ args.ne00*args.ne01*i02 +
+ args.ne00*args.ne01*args.ne02*i03;
+
+ if (i00 + tpitg.x < args.ne00) {
+ dst_row[i00 + tpitg.x] += tmp_row[ib - 1];
+ }
+}
+
+typedef decltype(kernel_cumsum_add<float>) kernel_cumsum_add_t;
+
+template [[host_name("kernel_cumsum_add_f32")]] kernel kernel_cumsum_add_t kernel_cumsum_add<float>;
+
+
+template<uint32_t ttype>
+bool _ggml_vec_tri_cmp(const int i, const int r);
+
+template<>
+bool _ggml_vec_tri_cmp</* GGML_TRI_TYPE_LOWER */ 3>(const int i, const int r) {
+ return i < r;
+}
+
+template<>
+bool _ggml_vec_tri_cmp</* GGML_TRI_TYPE_LOWER_DIAG */ 2>(const int i, const int r) {
+ return i <= r;
+}
+
+template<>
+bool _ggml_vec_tri_cmp</* GGML_TRI_TYPE_UPPER */ 1>(const int i, const int r) {
+ return i > r;
+}
+
+template<>
+bool _ggml_vec_tri_cmp</* GGML_TRI_TYPE_UPPER_DIAG */ 0>(const int i, const int r) {
+ return i >= r;
+}
+
+template<typename T, int ttype>
+kernel void kernel_tri(
+ constant ggml_metal_kargs_tri & args,
+ device const char * src0,
+ device const char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int i3 = tgpig.z;
+ const int i2 = tgpig.y;
+ const int i1 = tgpig.x;
+
+ if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
+ return;
+ }
+
+ device const T * src_row = (device const T *) ((device const char *) src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03);
+ device T * dst_row = (device T *) ((device char *) dst + i1*args.nb1 + i2*args.nb2 + i3*args.nb3);
+
+ // Each thread is a single element of the row if ne00 < max threads per
+ // threadgroup, so this will loop once for each index that this thread is
+ // responsible for
+ for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) {
+ // Use the comparison as a mask for branchless
+ dst_row[i0] = static_cast<T>(_ggml_vec_tri_cmp<ttype>(i0, i1)) * src_row[i0];
+ }
+}
+
+typedef decltype(kernel_tri<float, 0>) kernel_tri_t;
+
+template [[host_name("kernel_tri_f32_0")]] kernel kernel_tri_t kernel_tri<float, 0>;
+template [[host_name("kernel_tri_f32_1")]] kernel kernel_tri_t kernel_tri<float, 1>;
+template [[host_name("kernel_tri_f32_2")]] kernel kernel_tri_t kernel_tri<float, 2>;
+template [[host_name("kernel_tri_f32_3")]] kernel kernel_tri_t kernel_tri<float, 3>;
+template [[host_name("kernel_tri_f16_0")]] kernel kernel_tri_t kernel_tri<half, 0>;
+template [[host_name("kernel_tri_f16_1")]] kernel kernel_tri_t kernel_tri<half, 1>;
+template [[host_name("kernel_tri_f16_2")]] kernel kernel_tri_t kernel_tri<half, 2>;
+template [[host_name("kernel_tri_f16_3")]] kernel kernel_tri_t kernel_tri<half, 3>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_tri_bf16_0")]] kernel kernel_tri_t kernel_tri<bfloat, 0>;
+template [[host_name("kernel_tri_bf16_1")]] kernel kernel_tri_t kernel_tri<bfloat, 1>;
+template [[host_name("kernel_tri_bf16_2")]] kernel kernel_tri_t kernel_tri<bfloat, 2>;
+template [[host_name("kernel_tri_bf16_3")]] kernel kernel_tri_t kernel_tri<bfloat, 3>;
+#endif
+
+template<typename T>
+kernel void kernel_soft_max(
+ constant ggml_metal_kargs_soft_max & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * src2,
+ device char * dst,
+ threadgroup float * buf [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint3 tptg[[threads_per_threadgroup]]) {
+ const int32_t i03 = tgpig.z;
+ const int32_t i02 = tgpig.y;
+ const int32_t i01 = tgpig.x;
+
+ const int32_t i13 = i03%args.ne13;
+ const int32_t i12 = i02%args.ne12;
+ const int32_t i11 = i01;
+
+ device const float * psrc0 = (device const float *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03);
+ device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr;
+ device const float * psrc2 = src2 != src0 ? (device const float *) (src2) : nullptr;
+ device float * pdst = (device float *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3);
+
+ float slope = 1.0f;
+
+ // ALiBi
+ if (args.max_bias > 0.0f) {
+ const int32_t h = i02;
+
+ const float base = h < args.n_head_log2 ? args.m0 : args.m1;
+ const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1;
+
+ slope = pow(base, exp);
+ }
+
+ // parallel max
+ float lmax = psrc2 ? psrc2[i02] : -INFINITY;
+
+ for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) {
+ lmax = MAX(lmax, psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f));
+ }
+
+ // find the max value in the block
+ float max_val = simd_max(lmax);
+ if (tptg.x > N_SIMDWIDTH) {
+ if (sgitg == 0) {
+ buf[tiisg] = -INFINITY;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ buf[sgitg] = max_val;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ max_val = buf[tiisg];
+ max_val = simd_max(max_val);
+ }
+
+ // parallel sum
+ float lsum = 0.0f;
+ for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) {
+ const float exp_psrc0 = exp((psrc0[i00]*args.scale + (pmask ? slope*pmask[i00] : 0.0f)) - max_val);
+ lsum += exp_psrc0;
+ pdst[i00] = exp_psrc0;
+ }
+
+ // This barrier fixes a failing test
+ // ref: https://github.com/ggml-org/ggml/pull/621#discussion_r1425156335
+ threadgroup_barrier(mem_flags::mem_none);
+
+ float sum = simd_sum(lsum);
+
+ if (tptg.x > N_SIMDWIDTH) {
+ if (sgitg == 0) {
+ buf[tiisg] = 0.0f;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ buf[sgitg] = sum;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sum = buf[tiisg];
+ sum = simd_sum(sum);
+ }
+
+ if (psrc2) {
+ sum += exp(psrc2[i02] - max_val);
+ }
+
+ const float inv_sum = 1.0f/sum;
+
+ for (int i00 = tpitg.x; i00 < args.ne00; i00 += tptg.x) {
+ pdst[i00] *= inv_sum;
+ }
+}
+
+template<typename T>
+kernel void kernel_soft_max_4(
+ constant ggml_metal_kargs_soft_max & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * src2,
+ device char * dst,
+ threadgroup float * buf [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint3 tptg[[threads_per_threadgroup]]) {
+ const int32_t i03 = tgpig.z;
+ const int32_t i02 = tgpig.y;
+ const int32_t i01 = tgpig.x;
+
+ const int32_t i13 = i03%args.ne13;
+ const int32_t i12 = i02%args.ne12;
+ const int32_t i11 = i01;
+
+ device const float4 * psrc4 = (device const float4 *) (src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03);
+ device const T * pmask = src1 != src0 ? (device const T * ) (src1 + i11*args.nb11 + i12*args.nb12 + i13*args.nb13) : nullptr;
+ device const float * psrc2 = src2 != src0 ? (device const float * ) (src2) : nullptr;
+ device float4 * pdst4 = (device float4 *) (dst + i01*args.nb1 + i02*args.nb2 + i03*args.nb3);
+
+ float slope = 1.0f;
+
+ if (args.max_bias > 0.0f) {
+ const int32_t h = i02;
+
+ const float base = h < args.n_head_log2 ? args.m0 : args.m1;
+ const int exp = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1;
+
+ slope = pow(base, exp);
+ }
+
+ // parallel max
+ float4 lmax4 = psrc2 ? psrc2[i02] : -INFINITY;
+
+ for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) {
+ lmax4 = fmax(lmax4, psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f)));
+ }
+
+ const float lmax = MAX(MAX(lmax4[0], lmax4[1]), MAX(lmax4[2], lmax4[3]));
+
+ float max_val = simd_max(lmax);
+ if (tptg.x > N_SIMDWIDTH) {
+ if (sgitg == 0) {
+ buf[tiisg] = -INFINITY;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ buf[sgitg] = max_val;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ max_val = buf[tiisg];
+ max_val = simd_max(max_val);
+ }
+
+ // parallel sum
+ float4 lsum4 = 0.0f;
+ for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) {
+ const float4 exp_psrc4 = exp((psrc4[i00]*args.scale + (float4)((pmask ? slope*pmask[i00] : 0.0f))) - max_val);
+ lsum4 += exp_psrc4;
+ pdst4[i00] = exp_psrc4;
+ }
+
+ const float lsum = lsum4[0] + lsum4[1] + lsum4[2] + lsum4[3];
+
+ // This barrier fixes a failing test
+ // ref: https://github.com/ggml-org/ggml/pull/621#discussion_r1425156335
+ threadgroup_barrier(mem_flags::mem_none);
+
+ float sum = simd_sum(lsum);
+
+ if (tptg.x > N_SIMDWIDTH) {
+ if (sgitg == 0) {
+ buf[tiisg] = 0.0f;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ buf[sgitg] = sum;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sum = buf[tiisg];
+ sum = simd_sum(sum);
+ }
+
+ if (psrc2) {
+ sum += exp(psrc2[i02] - max_val);
+ }
+
+ const float inv_sum = 1.0f/sum;
+
+ for (int i00 = tpitg.x; i00 < args.ne00/4; i00 += tptg.x) {
+ pdst4[i00] *= inv_sum;
+ }
+}
+
+typedef decltype(kernel_soft_max<float>) kernel_soft_max_t;
+typedef decltype(kernel_soft_max_4<float4>) kernel_soft_max_4_t;
+
+template [[host_name("kernel_soft_max_f16")]] kernel kernel_soft_max_t kernel_soft_max<half>;
+template [[host_name("kernel_soft_max_f32")]] kernel kernel_soft_max_t kernel_soft_max<float>;
+template [[host_name("kernel_soft_max_f16_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4<half4>;
+template [[host_name("kernel_soft_max_f32_4")]] kernel kernel_soft_max_4_t kernel_soft_max_4<float4>;
+
+// ref: ggml.c:ggml_compute_forward_ssm_conv_f32
+kernel void kernel_ssm_conv_f32_f32(
+ constant ggml_metal_kargs_ssm_conv & args,
+ device const void * src0,
+ device const void * src1,
+ device float * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+ const int64_t ir = tgpig.x;
+ const int64_t i2 = tgpig.y;
+ const int64_t i3 = tgpig.z;
+
+ const int64_t nc = args.ne10;
+ //const int64_t ncs = args.ne00;
+ //const int64_t nr = args.ne01;
+ //const int64_t n_t = args.ne1;
+ //const int64_t n_s = args.ne2;
+
+ device const float * s = (device const float *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02);
+ device const float * c = (device const float *) ((device const char *) src1 + ir*args.nb11);
+ device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2);
+
+ float sumf = 0.0f;
+
+ for (int64_t i0 = 0; i0 < nc; ++i0) {
+ sumf += s[i0] * c[i0];
+ }
+
+ x[0] = sumf;
+}
+
+kernel void kernel_ssm_conv_f32_f32_4(
+ constant ggml_metal_kargs_ssm_conv & args,
+ device const void * src0,
+ device const void * src1,
+ device float * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+ const int64_t ir = tgpig.x;
+ const int64_t i2 = tgpig.y;
+ const int64_t i3 = tgpig.z;
+
+ const int64_t nc = args.ne10;
+ //const int64_t ncs = args.ne00;
+ //const int64_t nr = args.ne01;
+ //const int64_t n_t = args.ne1;
+ //const int64_t n_s = args.ne2;
+
+ device const float4 * s = (device const float4 *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02);
+ device const float4 * c = (device const float4 *) ((device const char *) src1 + ir*args.nb11);
+ device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2);
+
+ float sumf = 0.0f;
+
+ for (int64_t i0 = 0; i0 < nc/4; ++i0) {
+ sumf += dot(s[i0], c[i0]);
+ }
+
+ x[0] = sumf;
+}
+
+constant short FC_ssm_conv_bs [[function_constant(FC_SSM_CONV + 0)]];
+
+// Batched version: each threadgroup processes multiple tokens for better efficiency
+// Thread layout: each thread handles one token, threadgroup covers BATCH_SIZE tokens
+kernel void kernel_ssm_conv_f32_f32_batched(
+ constant ggml_metal_kargs_ssm_conv & args,
+ device const void * src0,
+ device const void * src1,
+ device float * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+ // tgpig.x = row index (ir)
+ // tgpig.y = batch of tokens (i2_base / BATCH_SIZE)
+ // tgpig.z = sequence index (i3)
+ // tpitg.x = thread within batch (0..BATCH_SIZE-1)
+ const short BATCH_SIZE = FC_ssm_conv_bs;
+
+ const int64_t ir = tgpig.x;
+ const int64_t i2_base = tgpig.y * BATCH_SIZE;
+ const int64_t i3 = tgpig.z;
+ const int64_t i2_off = tpitg.x;
+ const int64_t i2 = i2_base + i2_off;
+
+ const int64_t nc = args.ne10; // conv kernel size (typically 4)
+ const int64_t n_t = args.ne1; // number of tokens
+
+ // Bounds check for partial batches at the end
+ if (i2 >= n_t) {
+ return;
+ }
+
+ // Load conv weights (shared across all tokens for this row)
+ device const float * c = (device const float *) ((device const char *) src1 + ir*args.nb11);
+
+ // Load source for this specific token
+ device const float * s = (device const float *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02);
+
+ // Output location for this token
+ device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2);
+
+ float sumf = 0.0f;
+ for (int64_t i0 = 0; i0 < nc; ++i0) {
+ sumf += s[i0] * c[i0];
+ }
+
+ x[0] = sumf;
+}
+
+kernel void kernel_ssm_conv_f32_f32_batched_4(
+ constant ggml_metal_kargs_ssm_conv & args,
+ device const void * src0,
+ device const void * src1,
+ device float * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+ // tgpig.x = row index (ir)
+ // tgpig.y = batch of tokens (i2_base / BATCH_SIZE)
+ // tgpig.z = sequence index (i3)
+ // tpitg.x = thread within batch (0..BATCH_SIZE-1)
+ const short BATCH_SIZE = FC_ssm_conv_bs;
+
+ const int64_t ir = tgpig.x;
+ const int64_t i2_base = tgpig.y * BATCH_SIZE;
+ const int64_t i3 = tgpig.z;
+ const int64_t i2_off = tpitg.x;
+ const int64_t i2 = i2_base + i2_off;
+
+ const int64_t nc = args.ne10; // conv kernel size (typically 4)
+ const int64_t n_t = args.ne1; // number of tokens
+
+ // Bounds check for partial batches at the end
+ if (i2 >= n_t) {
+ return;
+ }
+
+ // Load conv weights (shared across all tokens for this row)
+ device const float4 * c = (device const float4 *) ((device const char *) src1 + ir*args.nb11);
+
+ // Load source for this specific token
+ device const float4 * s = (device const float4 *) ((device const char *) src0 + ir*args.nb01 + i2*args.nb00 + i3*args.nb02);
+
+ // Output location for this token
+ device float * x = (device float *) ((device char *) dst + ir*args.nb0 + i2*args.nb1 + i3*args.nb2);
+
+ float sumf = 0.0f;
+ for (int64_t i0 = 0; i0 < nc/4; ++i0) {
+ sumf += dot(s[i0], c[i0]);
+ }
+
+ x[0] = sumf;
+}
+
+// ref: ggml.c:ggml_compute_forward_ssm_scan_f32, Mamba-2 part
+// Optimized version: reduces redundant memory loads by having one thread load shared values
+kernel void kernel_ssm_scan_f32(
+ constant ggml_metal_kargs_ssm_scan & args,
+ device const void * src0,
+ device const void * src1,
+ device const void * src2,
+ device const void * src3,
+ device const void * src4,
+ device const void * src5,
+ device const void * src6,
+ device float * dst,
+ threadgroup float * shared [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgptg[[simdgroups_per_threadgroup]],
+ uint3 tgpg[[threadgroups_per_grid]]) {
+ constexpr short NW = N_SIMDWIDTH;
+
+ // Shared memory layout:
+ // [0..sgptg*NW-1]: partial sums for reduction (existing)
+ // [sgptg*NW..sgptg*NW+sgptg-1]: pre-computed x_dt values for each token in batch
+ // [sgptg*NW+sgptg..sgptg*NW+2*sgptg-1]: pre-computed dA values for each token in batch
+ threadgroup float * shared_sums = shared;
+ threadgroup float * shared_x_dt = shared + sgptg * NW;
+ threadgroup float * shared_dA = shared + sgptg * NW + sgptg;
+
+ shared_sums[tpitg.x] = 0.0f;
+
+ const int32_t i0 = tpitg.x;
+ const int32_t i1 = tgpig.x;
+ const int32_t ir = tgpig.y; // current head
+ const int32_t i3 = tgpig.z; // current seq
+
+ const int32_t nc = args.d_state;
+ const int32_t nr = args.d_inner;
+ const int32_t nh = args.n_head;
+ const int32_t ng = args.n_group;
+ const int32_t n_t = args.n_seq_tokens;
+
+ const int32_t s_off = args.s_off;
+
+ device const int32_t * ids = (device const int32_t *) src6;
+
+ device const float * s0_buff = (device const float *) ((device const char *) src0 + ir*args.nb02 + ids[i3]*args.nb03);
+ device float * s_buff = (device float *) ((device char *) dst + ir*args.nb02 + i3*args.nb03 + s_off);
+
+ const int32_t i = i0 + i1*nc;
+ const int32_t g = ir / (nh / ng); // repeat_interleave
+
+ float s0 = s0_buff[i];
+ float s = 0.0f;
+
+ device const float * A = (device const float *) ((device const char *) src3 + ir*args.nb31); // {ne30, nh}
+
+ const float A0 = A[i0%args.ne30];
+
+ device const float * x = (device const float *)((device const char *) src1 + i1*args.nb10 + ir*args.nb11 + i3*args.nb13); // {dim, nh, nt, ns}
+ device const float * dt = (device const float *)((device const char *) src2 + ir*args.nb20 + i3*args.nb22); // {nh, nt, ns}
+ device const float * B = (device const float *)((device const char *) src4 + g*args.nb41 + i3*args.nb43); // {d_state, ng, nt, ns}
+ device const float * C = (device const float *)((device const char *) src5 + g*args.nb51 + i3*args.nb53); // {d_state, ng, nt, ns}
+
+ device float * y = dst + (i1 + ir*(nr) + i3*(n_t*nh*nr)); // {dim, nh, nt, ns}
+
+ for (int i2 = 0; i2 < n_t; i2 += sgptg) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // Pre-compute x_dt and dA for this batch of tokens
+ // Only first sgptg threads do the loads and expensive math
+ if (i0 < sgptg && i2 + i0 < n_t) {
+ // ns12 and ns21 are element strides (nb12/nb10, nb21/nb20)
+ device const float * x_t = x + i0 * args.ns12;
+ device const float * dt_t = dt + i0 * args.ns21;
+
+ const float dt0 = dt_t[0];
+ const float dtsp = dt0 <= 20.0f ? log(1.0f + exp(dt0)) : dt0;
+ shared_x_dt[i0] = x_t[0] * dtsp;
+ shared_dA[i0] = dtsp; // Store dtsp, compute exp(dtsp * A0) per-thread since A0 varies
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (int t = 0; t < sgptg && i2 + t < n_t; t++) {
+ const float x_dt = shared_x_dt[t];
+ const float dA = exp(shared_dA[t] * A0);
+
+ s = (s0 * dA) + (B[i0] * x_dt);
+
+ const float sumf = simd_sum(s * C[i0]);
+
+ if (tiisg == 0) {
+ shared_sums[t*NW + sgitg] = sumf;
+ }
+
+ // recurse
+ s0 = s;
+
+ B += args.ns42;
+ C += args.ns52;
+ }
+
+ // Advance pointers for next batch
+ x += sgptg * args.ns12;
+ dt += sgptg * args.ns21;
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ const float sumf = simd_sum(shared_sums[sgitg*NW + tiisg]);
+
+ if (tiisg == 0 && i2 + sgitg < n_t) {
+ y[sgitg*nh*nr] = sumf;
+ }
+
+ y += sgptg*nh*nr;
+ }
+
+ s_buff[i] = s;
+}
+
+kernel void kernel_rwkv_wkv6_f32(
+ device const float * k,
+ device const float * v,
+ device const float * r,
+ device const float * tf,
+ device const float * td,
+ device const float * state_in,
+ device float * dst,
+ constant uint & B,
+ constant uint & T,
+ constant uint & C,
+ constant uint & H,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const uint head_size = 64; // TODO: support head_size = 128
+ const uint batch_id = tgpig.x / H;
+ const uint head_id = tgpig.x % H;
+ const uint tid = tpitg.x;
+
+ if (batch_id >= B || head_id >= H) {
+ return;
+ }
+
+ const uint state_size = C * head_size;
+ const uint n_seq_tokens = T / B;
+
+ threadgroup float _k[head_size];
+ threadgroup float _r[head_size];
+ threadgroup float _tf[head_size];
+ threadgroup float _td[head_size];
+
+ float state[head_size];
+
+ for (uint i = 0; i < head_size; i++) {
+ state[i] = state_in[batch_id * state_size + head_id * head_size * head_size
+ + i * head_size + tid];
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ _tf[tid] = tf[head_id * head_size + tid];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid;
+ const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid;
+
+ for (uint t = start_t; t < end_t; t += C) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ _k[tid] = k[t];
+ _r[tid] = r[t];
+ _td[tid] = td[t];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ const float v_val = v[t];
+ float y = 0.0;
+
+ for (uint j = 0; j < head_size; j += 4) {
+ float4 k_vec = float4(_k[j], _k[j+1], _k[j+2], _k[j+3]);
+ float4 r_vec = float4(_r[j], _r[j+1], _r[j+2], _r[j+3]);
+ float4 tf_vec = float4(_tf[j], _tf[j+1], _tf[j+2], _tf[j+3]);
+ float4 td_vec = float4(_td[j], _td[j+1], _td[j+2], _td[j+3]);
+ float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]);
+
+ float4 kv = k_vec * v_val;
+
+ float4 temp = tf_vec * kv + s_vec;
+ y += dot(r_vec, temp);
+
+ s_vec = s_vec * td_vec + kv;
+ state[j] = s_vec[0];
+ state[j+1] = s_vec[1];
+ state[j+2] = s_vec[2];
+ state[j+3] = s_vec[3];
+ }
+
+ dst[t] = y;
+ }
+
+ for (uint i = 0; i < head_size; i++) {
+ dst[T * C + batch_id * state_size + head_id * head_size * head_size
+ + i * head_size + tid] = state[i];
+ }
+}
+
+kernel void kernel_rwkv_wkv7_f32(
+ device const float * r,
+ device const float * w,
+ device const float * k,
+ device const float * v,
+ device const float * a,
+ device const float * b,
+ device const float * state_in,
+ device float * dst,
+ constant uint & B,
+ constant uint & T,
+ constant uint & C,
+ constant uint & H,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const uint head_size = 64; // TODO: support head_size = 128
+ const uint batch_id = tgpig.x / H;
+ const uint head_id = tgpig.x % H;
+ const uint tid = tpitg.x;
+
+ if (batch_id >= B || head_id >= H) {
+ return;
+ }
+
+ const uint state_size = C * head_size;
+ const uint n_seq_tokens = T / B;
+
+ threadgroup float _r[head_size];
+ threadgroup float _w[head_size];
+ threadgroup float _k[head_size];
+ threadgroup float _a[head_size];
+ threadgroup float _b[head_size];
+
+ float state[head_size];
+
+ for (uint i = 0; i < head_size; i++) {
+ state[i] = state_in[batch_id * state_size + head_id * head_size * head_size
+ + tid * head_size + i];
+ }
+
+ const uint start_t = batch_id * n_seq_tokens * C + head_id * head_size + tid;
+ const uint end_t = (batch_id + 1) * n_seq_tokens * C + head_id * head_size + tid;
+
+ for (uint t = start_t; t < end_t; t += C) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ _r[tid] = r[t];
+ _w[tid] = w[t];
+ _k[tid] = k[t];
+ _a[tid] = a[t];
+ _b[tid] = b[t];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ const float v_val = v[t];
+ float y = 0.0, sa = 0.0;
+
+ float4 sa_vec(0.0);
+
+ for (uint j = 0; j < head_size; j += 4) {
+ float4 a_vec = float4(_a[j], _a[j+1], _a[j+2], _a[j+3]);
+ float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]);
+ sa_vec += a_vec * s_vec;
+ }
+ sa = sa_vec[0] + sa_vec[1] + sa_vec[2] + sa_vec[3];
+
+ for (uint j = 0; j < head_size; j += 4) {
+ float4 r_vec = float4(_r[j], _r[j+1], _r[j+2], _r[j+3]);
+ float4 w_vec = float4(_w[j], _w[j+1], _w[j+2], _w[j+3]);
+ float4 k_vec = float4(_k[j], _k[j+1], _k[j+2], _k[j+3]);
+ float4 b_vec = float4(_b[j], _b[j+1], _b[j+2], _b[j+3]);
+ float4 s_vec = float4(state[j], state[j+1], state[j+2], state[j+3]);
+
+ float4 kv = k_vec * v_val;
+
+ s_vec = s_vec * w_vec + kv + sa * b_vec;
+ y += dot(s_vec, r_vec);
+
+ state[j] = s_vec[0];
+ state[j+1] = s_vec[1];
+ state[j+2] = s_vec[2];
+ state[j+3] = s_vec[3];
+ }
+
+ dst[t] = y;
+ }
+
+ for (uint i = 0; i < head_size; i++) {
+ dst[T * C + batch_id * state_size + head_id * head_size * head_size
+ + tid * head_size + i] = state[i];
+ }
+}
+
+constant short FC_solve_tri_nsg [[function_constant(FC_SOLVE_TRI + 0)]];
+constant short FC_solve_tri_n [[function_constant(FC_SOLVE_TRI + 1)]];
+constant short FC_solve_tri_k [[function_constant(FC_SOLVE_TRI + 2)]];
+
+kernel void kernel_solve_tri_f32(
+ constant ggml_metal_kargs_solve_tri & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ ushort3 tgpig[[threadgroup_position_in_grid]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ constexpr short NW = N_SIMDWIDTH;
+
+ const short NSG = FC_solve_tri_nsg;
+ const short N = FC_solve_tri_n;
+ const short K = FC_solve_tri_k;
+ const short NP = PAD2(N, NW);
+
+ const int32_t ne02 = args.ne02;
+ const int32_t ne03 = args.ne03;
+
+ const int32_t i03 = tgpig.z;
+ const int32_t i02 = tgpig.y;
+ const int32_t i01 = tgpig.x*NSG + sgitg;
+
+ threadgroup float * sh0 = (threadgroup float *) shmem;
+
+ device const float * src0_ptr = (device const float *)(src0 + i02 * args.nb02 + i03 * args.nb03) + sgitg*N;
+ device const float * src1_ptr = (device const float *)(src1 + i02 * args.nb12 + i03 * args.nb13) + i01;
+ device float * dst_ptr = (device float *)(dst + i02 * args.nb2 + i03 * args.nb3) + i01;
+
+ for (short rr = 0; rr < N; rr += NSG) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ {
+ threadgroup float * sh0_cur = sh0 + sgitg*NP;
+
+ for (short t = 0; t*NW < N; ++t) {
+ const short idx = t*NW + tiisg;
+ sh0_cur[idx] = src0_ptr[idx];
+ }
+
+ src0_ptr += NSG*N;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (i01 >= args.ne10) {
+ continue;
+ }
+
+ for (short ir = 0; ir < NSG && rr + ir < N; ++ir) {
+ const short r = rr + ir;
+
+ threadgroup float * sh0_cur = sh0 + ir*NP;
+
+ float sum = 0.0f;
+
+ for (short t = 0; t*NW < r; ++t) {
+ const short idx = t*NW + tiisg;
+ sum += sh0_cur[idx] * dst_ptr[idx*K] * (idx < r);
+ }
+
+ sum = simd_sum(sum);
+
+ if (tiisg == 0) {
+ const float diag = sh0_cur[r];
+
+ dst_ptr[r*K] = (src1_ptr[r*K] - sum) / diag;
+ }
+ }
+ }
+}
+
+kernel void kernel_argmax_f32(
+ constant ggml_metal_kargs_argmax & args,
+ device const char * src0,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ device const float * x_row = (device const float *) ((device const char *) src0 + tgpig * args.nb01);
+
+ float lmax = -INFINITY;
+ int32_t larg = -1;
+
+ for (int i00 = tpitg; i00 < args.ne00; i00 += ntg) {
+ if (x_row[i00] > lmax) {
+ lmax = x_row[i00];
+ larg = i00;
+ }
+ }
+
+ // find the argmax value in the block
+ float max_val = simd_max(lmax);
+ int32_t arg_val = simd_max(select(-1, larg, lmax == max_val));
+
+ device int32_t * dst_i32 = (device int32_t *) dst;
+
+ threadgroup float * shared_maxval = (threadgroup float *) shmem;
+ threadgroup int32_t * shared_argmax = (threadgroup int32_t *) shmem + N_SIMDWIDTH;
+
+ if (ntg > N_SIMDWIDTH) {
+ if (sgitg == 0) {
+ shared_maxval[tiisg] = -INFINITY;
+ shared_argmax[tiisg] = -1;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ shared_maxval[sgitg] = max_val;
+ shared_argmax[sgitg] = arg_val;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ max_val = shared_maxval[tiisg];
+ arg_val = shared_argmax[tiisg];
+
+ float max_val_reduced = simd_max(max_val);
+ int32_t arg_val_reduced = simd_max(select(-1, arg_val, max_val == max_val_reduced));
+
+ dst_i32[tgpig] = arg_val_reduced;
+
+ return;
+ }
+
+ dst_i32[tgpig] = arg_val;
+}
+
+// F == 1 : norm (no fuse)
+// F == 2 : norm + mul
+// F == 3 : norm + mul + add
+template <typename T, short F>
+kernel void kernel_norm_fuse_impl(
+ constant ggml_metal_kargs_norm & args,
+ device const char * src0,
+ device const char * src1_0,
+ device const char * src1_1,
+ device char * dst,
+ threadgroup float * shmem_f32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ if (sgitg == 0) {
+ shmem_f32[tiisg] = 0.0f;
+ }
+
+ const int i01 = tgpig.x;
+ const int i02 = tgpig.y;
+ const int i03 = tgpig.z;
+
+ device const T * x = (device const T *) (src0 + i03*args.nbf3[0] + i02*args.nbf2[0] + i01*args.nbf1[0]);
+
+ device const T * f0 = (device const T *) (src1_0 + (i03%args.nef3[1])*args.nbf3[1] + (i02%args.nef2[1])*args.nbf2[1] + (i01%args.nef1[1])*args.nbf1[1]);
+ device const T * f1 = (device const T *) (src1_1 + (i03%args.nef3[2])*args.nbf3[2] + (i02%args.nef2[2])*args.nbf2[2] + (i01%args.nef1[2])*args.nbf1[2]);
+
+ T sumft(0.0f);
+
+ float sumf = 0.0f;
+
+ for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) {
+ sumft += x[i00];
+ }
+ sumf = dot(sumft, T(1.0f));
+ sumf = simd_sum(sumf);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ shmem_f32[sgitg] = sumf;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sumf = shmem_f32[tiisg];
+ sumf = simd_sum(sumf);
+
+ const float mean = sumf/args.ne00;
+
+ device T * y = (device T *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1);
+
+ sumf = 0.0f;
+ for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) {
+ y[i00] = x[i00] - mean;
+ sumf += dot(y[i00], y[i00]);
+ }
+ sumf = simd_sum(sumf);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ shmem_f32[sgitg] = sumf;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sumf = shmem_f32[tiisg];
+ sumf = simd_sum(sumf);
+
+ const float variance = sumf/args.ne00;
+
+ const float scale = 1.0f/sqrt(variance + args.eps);
+ for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) {
+ if (F == 1) {
+ y[i00] = (y[i00]*scale);
+ }
+ if (F == 2) {
+ y[i00] = (y[i00]*scale)*f0[i00];
+ }
+ if (F == 3) {
+ y[i00] = (y[i00]*scale)*f0[i00] + f1[i00];
+ }
+ }
+}
+
+typedef decltype(kernel_norm_fuse_impl<float4, 1>) kernel_norm_fuse_t;
+
+template [[host_name("kernel_norm_f32")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl<float, 1>;
+template [[host_name("kernel_norm_mul_f32")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl<float, 2>;
+template [[host_name("kernel_norm_mul_add_f32")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl<float, 3>;
+
+template [[host_name("kernel_norm_f32_4")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl<float4, 1>;
+template [[host_name("kernel_norm_mul_f32_4")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl<float4, 2>;
+template [[host_name("kernel_norm_mul_add_f32_4")]] kernel kernel_norm_fuse_t kernel_norm_fuse_impl<float4, 3>;
+
+// F == 1 : rms_norm (no fuse)
+// F == 2 : rms_norm + mul
+// F == 3 : rms_norm + mul + add
+template <typename T, short F>
+kernel void kernel_rms_norm_fuse_impl(
+ constant ggml_metal_kargs_norm & args,
+ device const char * src0,
+ device const char * src1_0,
+ device const char * src1_1,
+ device char * dst,
+ threadgroup float * shmem_f32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ if (sgitg == 0) {
+ shmem_f32[tiisg] = 0.0f;
+ }
+
+ const int i01 = tgpig.x;
+ const int i02 = tgpig.y;
+ const int i03 = tgpig.z;
+
+ device const T * x = (device const T *) (src0 + i03*args.nbf3[0] + i02*args.nbf2[0] + i01*args.nbf1[0]);
+
+ device const T * f0 = (device const T *) (src1_0 + (i03%args.nef3[1])*args.nbf3[1] + (i02%args.nef2[1])*args.nbf2[1] + (i01%args.nef1[1])*args.nbf1[1]);
+ device const T * f1 = (device const T *) (src1_1 + (i03%args.nef3[2])*args.nbf3[2] + (i02%args.nef2[2])*args.nbf2[2] + (i01%args.nef1[2])*args.nbf1[2]);
+
+ float sumf = 0.0f;
+
+ // parallel sum
+ for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) {
+ sumf += dot(x[i00], x[i00]);
+ }
+ sumf = simd_sum(sumf);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ shmem_f32[sgitg] = sumf;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sumf = shmem_f32[tiisg];
+ sumf = simd_sum(sumf);
+
+ const float mean = sumf/args.ne00;
+ const float scale = 1.0f/sqrt(mean + args.eps);
+
+ device T * y = (device T *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1);
+ for (int i00 = tpitg.x; i00 < args.ne00_t; i00 += ntg.x) {
+ if (F == 1) {
+ y[i00] = (x[i00]*scale);
+ }
+ if (F == 2) {
+ y[i00] = (x[i00]*scale)*f0[i00];
+ }
+ if (F == 3) {
+ y[i00] = (x[i00]*scale)*f0[i00] + f1[i00];
+ }
+ }
+}
+
+typedef decltype(kernel_rms_norm_fuse_impl<float4, 1>) kernel_rms_norm_fuse_t;
+
+template [[host_name("kernel_rms_norm_f32")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<float, 1>;
+template [[host_name("kernel_rms_norm_mul_f32")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<float, 2>;
+template [[host_name("kernel_rms_norm_mul_add_f32")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<float, 3>;
+
+template [[host_name("kernel_rms_norm_f32_4")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<float4, 1>;
+template [[host_name("kernel_rms_norm_mul_f32_4")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<float4, 2>;
+template [[host_name("kernel_rms_norm_mul_add_f32_4")]] kernel kernel_rms_norm_fuse_t kernel_rms_norm_fuse_impl<float4, 3>;
+
+template <typename T0, typename T>
+kernel void kernel_l2_norm_impl(
+ constant ggml_metal_kargs_l2_norm & args,
+ device const char * src0,
+ device char * dst,
+ threadgroup float * shmem_f32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int i03 = tgpig.z;
+ const int i02 = tgpig.y;
+ const int i01 = tgpig.x;
+
+ if (sgitg == 0) {
+ shmem_f32[tiisg] = 0.0f;
+ }
+
+ device const T0 * x = (device const T0 *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01);
+ device T * y = (device T *) (dst + i03*args.nb3 + i02*args.nb2 + i01*args.nb1);
+
+ float sumf = 0.0f;
+
+ // parallel sum
+ for (int i00 = tpitg.x; i00 < args.ne00; i00 += ntg.x) {
+ sumf += dot(x[i00], x[i00]);
+ }
+ sumf = simd_sum(sumf);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ shmem_f32[sgitg] = sumf;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ sumf = shmem_f32[tiisg];
+ sumf = simd_sum(sumf);
+
+ const float scale = 1.0f/sqrt(max(sumf, args.eps));
+
+ for (int i00 = tpitg.x; i00 < args.ne00; i00 += ntg.x) {
+ y[i00] = x[i00] * scale;
+ }
+}
+
+typedef decltype(kernel_l2_norm_impl<float, float>) kernel_l2_norm_t;
+
+template [[host_name("kernel_l2_norm_f32_f32")]] kernel kernel_l2_norm_t kernel_l2_norm_impl<float, float>;
+template [[host_name("kernel_l2_norm_f32_f32_4")]] kernel kernel_l2_norm_t kernel_l2_norm_impl<float4, float4>;
+
+kernel void kernel_group_norm_f32(
+ constant ggml_metal_kargs_group_norm & args,
+ device const float * src0,
+ device float * dst,
+ threadgroup float * buf [[threadgroup(0)]],
+ uint tgpig[[threadgroup_position_in_grid]],
+ uint tpitg[[thread_position_in_threadgroup]],
+ uint sgitg[[simdgroup_index_in_threadgroup]],
+ uint tiisg[[thread_index_in_simdgroup]],
+ uint ntg[[threads_per_threadgroup]]) {
+ const int64_t ne = args.ne00*args.ne01*args.ne02;
+ const int64_t gs = args.ne00*args.ne01*((args.ne02 + args.ngrp - 1) / args.ngrp);
+
+ int start = tgpig * gs;
+ int end = start + gs;
+
+ start += tpitg;
+
+ if (end >= ne) {
+ end = ne;
+ }
+
+ float tmp = 0.0f; // partial sum for thread in warp
+
+ for (int j = start; j < end; j += ntg) {
+ tmp += src0[j];
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ tmp = simd_sum(tmp);
+ if (ntg > N_SIMDWIDTH) {
+ if (sgitg == 0) {
+ buf[tiisg] = 0.0f;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ buf[sgitg] = tmp;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ tmp = buf[tiisg];
+ tmp = simd_sum(tmp);
+ }
+
+ const float mean = tmp / gs;
+ tmp = 0.0f;
+
+ for (int j = start; j < end; j += ntg) {
+ float xi = src0[j] - mean;
+ dst[j] = xi;
+ tmp += xi * xi;
+ }
+
+ tmp = simd_sum(tmp);
+ if (ntg > N_SIMDWIDTH) {
+ if (sgitg == 0) {
+ buf[tiisg] = 0.0f;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tiisg == 0) {
+ buf[sgitg] = tmp;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ tmp = buf[tiisg];
+ tmp = simd_sum(tmp);
+ }
+
+ const float variance = tmp / gs;
+ const float scale = 1.0f/sqrt(variance + args.eps);
+ for (int j = start; j < end; j += ntg) {
+ dst[j] *= scale;
+ }
+}
+
+// function for calculate inner product between half a q4_0 block and 16 floats (yl), sumy is SUM(yl[i])
+// il indicates where the q4 quants begin (0 or QK4_0/4)
+// we assume that the yl's have been multiplied with the appropriate scale factor
+// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
+inline float block_q_n_dot_y(device const block_q4_0 * qb_curr, float sumy, thread float * yl, int il) {
+ float d = qb_curr->d;
+
+ float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
+
+ device const uint16_t * qs = ((device const uint16_t *) qb_curr + 1 + il/2);
+
+ for (int i = 0; i < 8; i += 2) {
+ acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F);
+ acc[1] += yl[i + 1] * (qs[i / 2] & 0x0F00);
+ acc[2] += yl[i + 8] * (qs[i / 2] & 0x00F0);
+ acc[3] += yl[i + 9] * (qs[i / 2] & 0xF000);
+ }
+
+ return d * (sumy * -8.f + acc[0] + acc[1] + acc[2] + acc[3]);
+}
+
+// function for calculate inner product between half a q4_1 block and 16 floats (yl), sumy is SUM(yl[i])
+// il indicates where the q4 quants begin (0 or QK4_0/4)
+// we assume that the yl's have been multiplied with the appropriate scale factor
+// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
+inline float block_q_n_dot_y(device const block_q4_1 * qb_curr, float sumy, thread float * yl, int il) {
+ float d = qb_curr->d;
+ float m = qb_curr->m;
+
+ float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
+
+ device const uint16_t * qs = ((device const uint16_t *) qb_curr + 2 + il/2);
+
+ for (int i = 0; i < 8; i+=2) {
+ acc[0] += yl[i + 0] * (qs[i / 2] & 0x000F);
+ acc[1] += yl[i + 1] * (qs[i / 2] & 0x0F00);
+ acc[2] += yl[i + 8] * (qs[i / 2] & 0x00F0);
+ acc[3] += yl[i + 9] * (qs[i / 2] & 0xF000);
+ }
+
+ return d * (acc[0] + acc[1] + acc[2] + acc[3]) + sumy * m;
+}
+
+// function for calculate inner product between half a q5_0 block and 16 floats (yl), sumy is SUM(yl[i])
+// il indicates where the q5 quants begin (0 or QK5_0/4)
+// we assume that the yl's have been multiplied with the appropriate scale factor
+// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
+inline float block_q_n_dot_y(device const block_q5_0 * qb_curr, float sumy, thread float * yl, int il) {
+ float d = qb_curr->d;
+
+ float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
+
+ device const uint16_t * qs = ((device const uint16_t *)qb_curr + 3 + il/2);
+ const uint32_t qh = *((device const uint32_t *)qb_curr->qh);
+
+ for (int i = 0; i < 8; i+=2) {
+ acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010));
+ acc[1] += yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000));
+ acc[2] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100));
+ acc[3] += yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000));
+ }
+
+ return d * (sumy * -16.f + acc[0] + acc[1] + acc[2] + acc[3]);
+}
+
+// function for calculate inner product between half a q5_1 block and 16 floats (yl), sumy is SUM(yl[i])
+// il indicates where the q5 quants begin (0 or QK5_1/4)
+// we assume that the yl's have been multiplied with the appropriate scale factor
+// that corresponds to the missing bit shifts (1, 1/16, 1/256, 1/4096)
+inline float block_q_n_dot_y(device const block_q5_1 * qb_curr, float sumy, thread float * yl, int il) {
+ float d = qb_curr->d;
+ float m = qb_curr->m;
+
+ float acc[4] = { 0.0f, 0.0f, 0.0f, 0.0f };
+
+ device const uint16_t * qs = ((device const uint16_t *)qb_curr + 4 + il/2);
+ const uint32_t qh = *((device const uint32_t *)qb_curr->qh);
+
+ for (int i = 0; i < 8; i+=2) {
+ acc[0] += yl[i + 0] * ((qs[i / 2] & 0x000F) | ((qh >> (i+0+il ) << 4 ) & 0x00010));
+ acc[1] += yl[i + 1] * ((qs[i / 2] & 0x0F00) | ((qh >> (i+1+il ) << 12) & 0x01000));
+ acc[2] += yl[i + 8] * ((qs[i / 2] & 0x00F0) | ((qh >> (i+0+il+QK5_0/2) << 8 ) & 0x00100));
+ acc[3] += yl[i + 9] * ((qs[i / 2] & 0xF000) | ((qh >> (i+1+il+QK5_0/2) << 16) & 0x10000));
+ }
+
+ return d * (acc[0] + acc[1] + acc[2] + acc[3]) + sumy * m;
+}
+
+template<short NR0>
+static inline void helper_mv_reduce_and_write(
+ device float * dst_f32,
+ float sumf[NR0],
+ const int r0,
+ const int ne01,
+ ushort tiisg,
+ ushort sgitg,
+ threadgroup char * shmem) {
+ constexpr short NW = N_SIMDWIDTH;
+
+ threadgroup float * shmem_f32[NR0];
+
+ for (short row = 0; row < NR0; ++row) {
+ shmem_f32[row] = (threadgroup float *) shmem + NW*row;
+
+ if (sgitg == 0) {
+ shmem_f32[row][tiisg] = 0.0f;
+ }
+
+ sumf[row] = simd_sum(sumf[row]);
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (short row = 0; row < NR0; ++row) {
+ if (tiisg == 0) {
+ shmem_f32[row][sgitg] = sumf[row];
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (short row = 0; row < NR0 && r0 + row < ne01; ++row) {
+ float tot = simd_sum(shmem_f32[row][tiisg]);
+
+ if (tiisg == 0 && sgitg == 0) {
+ dst_f32[r0 + row] = tot;
+ }
+ }
+}
+
+constant short FC_mul_mv_nsg [[function_constant(FC_MUL_MV + 0)]];
+constant short FC_mul_mv_nxpsg [[function_constant(FC_MUL_MV + 1)]];
+
+template<typename block_q_type, short NR0, typename args_t>
+void mul_vec_q_n_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ constexpr short NW = N_SIMDWIDTH;
+ constexpr short NQ = 16;
+
+ const int nb = args.ne00/QK4_0;
+
+ const int r0 = (tgpig.x*NSG + sgitg)*NR0;
+ //const int r0 = tgpig.x*NR0;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ //device const block_q_type * x = (device const block_q_type *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ // pointers to src0 rows
+ device const block_q_type * ax[NR0];
+ FOR_UNROLL (int row = 0; row < NR0; ++row) {
+ const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+
+ ax[row] = (device const block_q_type *) ((device char *) src0 + offset0);
+ }
+
+ float sumf[NR0] = {0.f};
+
+ const short ix = (tiisg/(NW/NQ));
+ const short il = (tiisg%(NW/NQ))*8;
+
+ //const int ib0 = sgitg*NQ + ix;
+ const int ib0 = ix;
+
+ float yl[16]; // src1 vector cache
+
+ //device const float * yb = y + ix*QK4_0 + il;
+ device const float * yb = y + ib0*QK4_0 + il;
+
+ // each thread in a SIMD group deals with half a block.
+ //for (int ib = ib0; ib < nb; ib += NSG*NQ) {
+ for (int ib = ib0; ib < nb; ib += NQ) {
+ float sumy[2] = { 0.f, 0.f };
+
+ FOR_UNROLL (short i = 0; i < 8; i += 2) {
+ sumy[0] += yb[i + 0] + yb[i + 1];
+ yl[i + 0] = yb[i + 0];
+ yl[i + 1] = yb[i + 1]/256.f;
+
+ sumy[1] += yb[i + 16] + yb[i + 17];
+ yl[i + 8] = yb[i + 16]/16.f;
+ yl[i + 9] = yb[i + 17]/4096.f;
+ }
+
+ FOR_UNROLL (short row = 0; row < NR0; row++) {
+ sumf[row] += block_q_n_dot_y(ax[row] + ib, sumy[0] + sumy[1], yl, il);
+ }
+
+ yb += QK4_0 * 16;
+ //yb += NSG*NQ*QK4_0;
+ }
+
+ device float * dst_f32 = (device float *) dst + im*args.ne0*args.ne1 + r1*args.ne0;
+
+ //helper_mv_reduce_and_write<NR0>(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem);
+
+ for (int row = 0; row < NR0; ++row) {
+ const float tot = simd_sum(sumf[row]);
+
+ if (tiisg == 0 && r0 + row < args.ne01) {
+ dst_f32[r0 + row] = tot;
+ }
+ }
+}
+
+kernel void kernel_mul_mv_q4_0_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ mul_vec_q_n_f32_impl<block_q4_0, N_R0_Q4_0, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+kernel void kernel_mul_mv_q4_1_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ mul_vec_q_n_f32_impl<block_q4_1, N_R0_Q4_1, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+kernel void kernel_mul_mv_q5_0_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ mul_vec_q_n_f32_impl<block_q5_0, N_R0_Q5_0, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+kernel void kernel_mul_mv_q5_1_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ mul_vec_q_n_f32_impl<block_q5_1, N_R0_Q5_1, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<short NR0, typename args_t>
+void kernel_mul_mv_q8_0_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ constexpr short NW = N_SIMDWIDTH;
+ constexpr short NQ = 8;
+
+ const int nb = args.ne00/QK8_0;
+
+ const int r0 = tgpig.x*NR0;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ //device const block_q8_0 * x = (device const block_q8_0 *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ // pointers to src0 rows
+ device const block_q8_0 * ax[NR0];
+ FOR_UNROLL (short row = 0; row < NR0; ++row) {
+ const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+
+ ax[row] = (device const block_q8_0 *) ((device char *) src0 + offset0);
+ }
+
+ float sumf[NR0] = { 0.f };
+
+ const short ix = tiisg/(NW/NQ);
+ const short il = tiisg%(NW/NQ);
+
+ const int ib0 = sgitg*NQ + ix;
+
+ float yl[NQ];
+
+ device const float * yb = y + ib0*QK8_0 + il*NQ;
+
+ // each thread in a SIMD group deals with NQ quants at a time
+ for (int ib = ib0; ib < nb; ib += NSG*NQ) {
+ for (short i = 0; i < NQ; ++i) {
+ yl[i] = yb[i];
+ }
+
+ for (short row = 0; row < NR0; row++) {
+ device const int8_t * qs = ax[row][ib].qs + il*NQ;
+
+ float sumq = 0.f;
+ FOR_UNROLL (short i = 0; i < NQ; ++i) {
+ sumq += qs[i] * yl[i];
+ }
+
+ sumf[row] += sumq*ax[row][ib].d;
+ }
+
+ yb += NSG*NQ*QK8_0;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ helper_mv_reduce_and_write<NR0>(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem);
+}
+
+[[host_name("kernel_mul_mv_q8_0_f32")]]
+kernel void kernel_mul_mv_q8_0_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ kernel_mul_mv_q8_0_f32_impl<N_R0_Q8_0, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+// mat-vec kernel processing in chunks of float4
+// chpb - chunks per quantization block
+template<short r1ptg, typename q_t, short chpb, void (*deq_t4)(device const q_t *, short, thread float4 &) >
+void kernel_mul_mv_ext_q4_f32_impl(
+ constant ggml_metal_kargs_mul_mv_ext & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ const short NSG = FC_mul_mv_nsg;
+ const short nxpsg = FC_mul_mv_nxpsg;
+
+ const short chpt = 4; // chunks per thread
+
+ //const short nxpsg = (32);
+ const short nypsg = (32/nxpsg);
+
+ const short tx = tiisg%nxpsg;
+ const short ty = tiisg/nxpsg;
+
+ const int i01 = tgpig.x*(nypsg*NSG) + nypsg*sgitg + ty;
+ const int i11 = tgpig.y*r1ptg;
+ const int i1m = tgpig.z;
+
+ const int i12 = i1m%args.ne12;
+ const int i13 = i1m/args.ne12;
+
+ const uint64_t offset0 = i01*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = i11*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const q_t * xq = (i01 < args.ne01) ? (device const q_t *) (src0 + offset0) + tx/chpb : (device const q_t *) src0;
+
+ device const float4 * y4[r1ptg];
+
+ for (int ir1 = 0; ir1 < r1ptg; ++ir1) {
+ y4[ir1] = (i11 + ir1 < args.ne11) ? (device const float4 *) (src1 + offset1 + ir1*args.nb11) + tx : (device const float4 *) src1;
+ }
+
+ float sumf[r1ptg] = { [ 0 ... r1ptg - 1 ] = 0.0f };
+
+ short cch = tx%chpb; // current chunk index
+
+ for (int ich = tx; 4*ich < args.ne00; ich += chpt*nxpsg) {
+ float4 lx[chpt];
+
+#pragma unroll(chpt)
+ for (short ch = 0; ch < chpt; ++ch) {
+ deq_t4(xq, cch, lx[ch]);
+
+ cch += nxpsg;
+ if (cch >= chpb) {
+ xq += cch/chpb;
+ cch %= chpb;
+ }
+ }
+
+#pragma unroll(chpt)
+ for (short ch = 0; ch < chpt; ++ch) {
+#pragma unroll(r1ptg)
+ for (short ir1 = 0; ir1 < r1ptg; ++ir1) {
+ sumf[ir1] += dot(lx[ch], y4[ir1][ch*nxpsg]);
+ }
+ }
+
+#pragma unroll(r1ptg)
+ for (short ir1 = 0; ir1 < r1ptg; ++ir1) {
+ y4[ir1] += chpt*nxpsg;
+ }
+ }
+
+ // reduce only the threads in each row
+ for (short ir1 = 0; ir1 < r1ptg; ++ir1) {
+ if (nxpsg >= 32) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 16);
+ }
+ if (nxpsg >= 16) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 8);
+ }
+ if (nxpsg >= 8) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 4);
+ }
+ if (nxpsg >= 4) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 2);
+ }
+ if (nxpsg >= 2) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 1);
+ }
+
+ //sumf[ir1] = simd_sum(sumf[ir1]);
+ }
+
+ if (tx == 0) {
+ for (short ir1 = 0; ir1 < r1ptg && i11 + ir1 < args.ne11; ++ir1) {
+ device float * dst_f32 = (device float *) dst + (uint64_t)i1m*args.ne0*args.ne1 + (uint64_t)(i11 + ir1)*args.ne0;
+
+ if (i01 < args.ne01) {
+ dst_f32[i01] = sumf[ir1];
+ }
+ }
+ }
+}
+
+// mat-vec kernel processing in chunks of float4x4
+template<short r1ptg, typename q_t, short chpb, void (*deq_t4x4)(device const q_t *, short, thread float4x4 &) >
+void kernel_mul_mv_ext_q4x4_f32_impl(
+ constant ggml_metal_kargs_mul_mv_ext & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ const short NSG = FC_mul_mv_nsg;
+ const short nxpsg = FC_mul_mv_nxpsg;
+
+ const short chpt = 1;
+
+ //const short nxpsg = (32);
+ const short nypsg = (32/nxpsg);
+
+ const short tx = tiisg%nxpsg;
+ const short ty = tiisg/nxpsg;
+
+ const int i01 = tgpig.x*(nypsg*NSG) + nypsg*sgitg + ty;
+ const int i11 = tgpig.y*r1ptg;
+ const int i1m = tgpig.z;
+
+ const int i12 = i1m%args.ne12;
+ const int i13 = i1m/args.ne12;
+
+ const uint64_t offset0 = i01*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = i11*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const q_t * xq = (i01 < args.ne01) ? (device const q_t *) (src0 + offset0) + tx/chpb : (device const q_t *) src0;
+
+ device const float4x4 * y4x4[r1ptg];
+
+ for (int ir1 = 0; ir1 < r1ptg; ++ir1) {
+ y4x4[ir1] = (i11 + ir1 < args.ne11) ? (device const float4x4 *) (src1 + offset1 + ir1*args.nb11) + tx : (device const float4x4 *) src1;
+ }
+
+ float sumf[r1ptg] = { [ 0 ... r1ptg - 1 ] = 0.0f };
+
+ short cch = tx%chpb;
+
+ for (int ich = tx; 16*ich < args.ne00; ich += chpt*nxpsg) {
+ float4x4 lx[chpt];
+
+#pragma unroll(chpt)
+ for (short ch = 0; ch < chpt; ++ch) {
+ deq_t4x4(xq, cch, lx[ch]);
+
+ cch += nxpsg;
+ if (cch >= chpb) {
+ xq += cch/chpb;
+ cch %= chpb;
+ }
+ }
+
+#pragma unroll(chpt)
+ for (short ch = 0; ch < chpt; ++ch) {
+#pragma unroll(r1ptg)
+ for (short ir1 = 0; ir1 < r1ptg; ++ir1) {
+ sumf[ir1] +=
+ dot(lx[ch][0], y4x4[ir1][ch*nxpsg][0]) +
+ dot(lx[ch][1], y4x4[ir1][ch*nxpsg][1]) +
+ dot(lx[ch][2], y4x4[ir1][ch*nxpsg][2]) +
+ dot(lx[ch][3], y4x4[ir1][ch*nxpsg][3]);
+
+ }
+ }
+
+#pragma unroll(r1ptg)
+ for (short ir1 = 0; ir1 < r1ptg; ++ir1) {
+ y4x4[ir1] += chpt*nxpsg;
+ }
+ }
+
+ for (short ir1 = 0; ir1 < r1ptg; ++ir1) {
+ if (nxpsg >= 32) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 16);
+ }
+ if (nxpsg >= 16) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 8);
+ }
+ if (nxpsg >= 8) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 4);
+ }
+ if (nxpsg >= 4) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 2);
+ }
+ if (nxpsg >= 2) {
+ sumf[ir1] += simd_shuffle_down(sumf[ir1], 1);
+ }
+
+ //sumf[ir1] = simd_sum(sumf[ir1]);
+ }
+
+ if (tx == 0) {
+ for (short ir1 = 0; ir1 < r1ptg && i11 + ir1 < args.ne11; ++ir1) {
+ device float * dst_f32 = (device float *) dst + (uint64_t)i1m*args.ne0*args.ne1 + (uint64_t)(i11 + ir1)*args.ne0;
+
+ if (i01 < args.ne01) {
+ dst_f32[i01] = sumf[ir1];
+ }
+ }
+ }
+}
+
+// dispatchers needed for compile-time nxpsg
+// epb - elements per quantization block
+template<short r1ptg, typename q_t, short epb, void (*deq_t4)(device const q_t *, short, thread float4 &)>
+kernel void kernel_mul_mv_ext_q4_f32_disp(
+ constant ggml_metal_kargs_mul_mv_ext & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ kernel_mul_mv_ext_q4_f32_impl<r1ptg, q_t, epb/4, deq_t4>(args, src0, src1, dst, tgpig, tiisg, sgitg);
+}
+
+template<short r1ptg, typename q_t, short epb, void (*deq_t4x4)(device const q_t *, short, thread float4x4 &)>
+kernel void kernel_mul_mv_ext_q4x4_f32_disp(
+ constant ggml_metal_kargs_mul_mv_ext & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ kernel_mul_mv_ext_q4x4_f32_impl<r1ptg, q_t, epb/16, deq_t4x4>(args, src0, src1, dst, tgpig, tiisg, sgitg);
+}
+
+typedef decltype(kernel_mul_mv_ext_q4_f32_disp <2, block_q8_0, 32, dequantize_q8_0_t4>) mul_mv_ext_q4_f32_t;
+typedef decltype(kernel_mul_mv_ext_q4x4_f32_disp<2, block_q4_K, 256, dequantize_q4_K>) mul_mv_ext_q4x4_f32_t;
+
+template [[host_name("kernel_mul_mv_ext_f32_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, float4, 4, dequantize_f32_t4>;
+template [[host_name("kernel_mul_mv_ext_f32_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, float4, 4, dequantize_f32_t4>;
+template [[host_name("kernel_mul_mv_ext_f32_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, float4, 4, dequantize_f32_t4>;
+template [[host_name("kernel_mul_mv_ext_f32_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, float4, 4, dequantize_f32_t4>;
+
+template [[host_name("kernel_mul_mv_ext_f16_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, half4, 4, dequantize_f16_t4>;
+template [[host_name("kernel_mul_mv_ext_f16_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, half4, 4, dequantize_f16_t4>;
+template [[host_name("kernel_mul_mv_ext_f16_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, half4, 4, dequantize_f16_t4>;
+template [[host_name("kernel_mul_mv_ext_f16_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, half4, 4, dequantize_f16_t4>;
+
+template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q4_0, 32, dequantize_q4_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q4_0, 32, dequantize_q4_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q4_0, 32, dequantize_q4_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q4_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q4_0, 32, dequantize_q4_0_t4>;
+
+template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q4_1, 32, dequantize_q4_1_t4>;
+template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q4_1, 32, dequantize_q4_1_t4>;
+template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q4_1, 32, dequantize_q4_1_t4>;
+template [[host_name("kernel_mul_mv_ext_q4_1_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q4_1, 32, dequantize_q4_1_t4>;
+
+template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q5_0, 32, dequantize_q5_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q5_0, 32, dequantize_q5_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q5_0, 32, dequantize_q5_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q5_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q5_0, 32, dequantize_q5_0_t4>;
+
+template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q5_1, 32, dequantize_q5_1_t4>;
+template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q5_1, 32, dequantize_q5_1_t4>;
+template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q5_1, 32, dequantize_q5_1_t4>;
+template [[host_name("kernel_mul_mv_ext_q5_1_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q5_1, 32, dequantize_q5_1_t4>;
+
+template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_q8_0, 32, dequantize_q8_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_q8_0, 32, dequantize_q8_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_q8_0, 32, dequantize_q8_0_t4>;
+template [[host_name("kernel_mul_mv_ext_q8_0_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_q8_0, 32, dequantize_q8_0_t4>;
+
+template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_mxfp4, 32, dequantize_mxfp4_t4>;
+template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_mxfp4, 32, dequantize_mxfp4_t4>;
+template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_mxfp4, 32, dequantize_mxfp4_t4>;
+template [[host_name("kernel_mul_mv_ext_mxfp4_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_mxfp4, 32, dequantize_mxfp4_t4>;
+
+template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_2")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<2, block_iq4_nl, 32, dequantize_iq4_nl_t4>;
+template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_3")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<3, block_iq4_nl, 32, dequantize_iq4_nl_t4>;
+template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_4")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<4, block_iq4_nl, 32, dequantize_iq4_nl_t4>;
+template [[host_name("kernel_mul_mv_ext_iq4_nl_f32_r1_5")]] kernel mul_mv_ext_q4_f32_t kernel_mul_mv_ext_q4_f32_disp<5, block_iq4_nl, 32, dequantize_iq4_nl_t4>;
+
+template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q4_K, 256, dequantize_q4_K>;
+template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q4_K, 256, dequantize_q4_K>;
+template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q4_K, 256, dequantize_q4_K>;
+template [[host_name("kernel_mul_mv_ext_q4_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q4_K, 256, dequantize_q4_K>;
+
+template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q5_K, 256, dequantize_q5_K>;
+template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q5_K, 256, dequantize_q5_K>;
+template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q5_K, 256, dequantize_q5_K>;
+template [[host_name("kernel_mul_mv_ext_q5_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q5_K, 256, dequantize_q5_K>;
+
+template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_2")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<2, block_q6_K, 256, dequantize_q6_K>;
+template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_3")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<3, block_q6_K, 256, dequantize_q6_K>;
+template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_4")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<4, block_q6_K, 256, dequantize_q6_K>;
+template [[host_name("kernel_mul_mv_ext_q6_K_f32_r1_5")]] kernel mul_mv_ext_q4x4_f32_t kernel_mul_mv_ext_q4x4_f32_disp<5, block_q6_K, 256, dequantize_q6_K>;
+
+template<typename T0, typename T1, short NR0, typename args_t>
+void kernel_mul_mv_t_t_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ constexpr short NW = N_SIMDWIDTH;
+ constexpr short NB = 32;
+ constexpr short NF = 8;
+
+ const int nb = args.ne00/NB;
+
+ const int r0 = tgpig.x*NR0;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ //device const T0 * x = (device const T0 *) (src0 + offset0);
+ device const T1 * y = (device const T1 *) (src1 + offset1);
+
+ // pointers to src0 rows
+ device const T0 * ax [NR0];
+ FOR_UNROLL (short row = 0; row < NR0; ++row) {
+ const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+
+ ax[row] = (device const T0 *) ((device char *) src0 + offset0);
+ }
+
+ float sumf[NR0] = { 0.f };
+
+ const short ix = tiisg/(NW/NF);
+ const short il = tiisg%(NW/NF);
+
+ const int ib0 = sgitg*NF + ix;
+
+ T1 yl[NF];
+
+ device const T1 * yb = y + (ib0*NB + il*NF);
+
+ for (int ib = ib0; ib < nb; ib += NSG*NF) {
+ for (short i = 0; i < NF; ++i) {
+ yl[i] = yb[i];
+ }
+
+ for (short row = 0; row < NR0; row++) {
+ device const T0 * xb = ax[row] + (ib*NB + il*NF);
+
+ float sumq = 0.f;
+ FOR_UNROLL (short i = 0; i < NF; ++i) {
+ sumq += xb[i] * yl[i];
+ }
+
+ sumf[row] += sumq;
+ }
+
+ yb += NSG*NF*NW;
+ }
+
+ for (int i = nb*NB + sgitg*NW + tiisg; i < args.ne00; i += NW*NSG) {
+ for (short row = 0; row < NR0; row++) {
+ sumf[row] += ax[row][i] * y[i];
+ }
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ helper_mv_reduce_and_write<NR0>(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem);
+}
+
+template<typename T0, typename T1, typename args_t>
+void kernel_mul_mv_t_t_disp(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ switch (args.nr0) {
+ //case 1: kernel_mul_mv_t_t_impl<T0, T1, 1, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ case 2: kernel_mul_mv_t_t_impl<T0, T1, 2, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ //case 3: kernel_mul_mv_t_t_impl<T0, T1, 3, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ //case 4: kernel_mul_mv_t_t_impl<T0, T1, 4, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ }
+}
+
+template<typename T0, typename T1>
+kernel void kernel_mul_mv_t_t(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ kernel_mul_mv_t_t_disp<T0, T1, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+typedef decltype(kernel_mul_mv_t_t<half, half>) mul_mv_t_t;
+
+template [[host_name("kernel_mul_mv_f32_f32")]] kernel mul_mv_t_t kernel_mul_mv_t_t<float, float>;
+template [[host_name("kernel_mul_mv_f16_f32")]] kernel mul_mv_t_t kernel_mul_mv_t_t<half, float>;
+template [[host_name("kernel_mul_mv_f16_f16")]] kernel mul_mv_t_t kernel_mul_mv_t_t<half, half>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_mul_mv_bf16_f32")]] kernel mul_mv_t_t kernel_mul_mv_t_t<bfloat, float>;
+template [[host_name("kernel_mul_mv_bf16_bf16")]] kernel mul_mv_t_t kernel_mul_mv_t_t<bfloat, bfloat>;
+#endif
+
+template<typename T0, typename T04, typename T1, typename T14, short NR0, typename args_t>
+void kernel_mul_mv_t_t_4_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ constexpr short NW = N_SIMDWIDTH;
+ constexpr short NB = 32;
+ constexpr short NF = 16;
+ constexpr short NF4 = NF/4;
+
+ const int nb = args.ne00/NB;
+
+ const int r0 = tgpig.x*NR0;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ //const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const T1 * y = (device const T1 *) (src1 + offset1);
+ device const T14 * y4 = (device const T14 *) (src1 + offset1);
+
+ // pointers to src0 rows
+ device const T0 * ax [NR0];
+ device const T04 * ax4[NR0];
+ FOR_UNROLL (short row = 0; row < NR0; ++row) {
+ const uint64_t offset0 = (r0 + row)*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+
+ ax [row] = (device const T0 *) ((device char *) src0 + offset0);
+ ax4[row] = (device const T04 *) ((device char *) src0 + offset0);
+ }
+
+ float sumf[NR0] = { 0.f };
+
+ const short ix = tiisg/(NW/NF);
+ const short il = tiisg%(NW/NF);
+
+ const int ib0 = sgitg*NF + ix;
+
+ T14 yl4[NF4];
+
+ device const T14 * yb4 = y4 + (ib0*NB + il*NF)/4;
+
+ for (int ib = ib0; ib < nb; ib += NSG*NF) {
+ for (short i = 0; i < NF4; ++i) {
+ yl4[i] = yb4[i];
+ }
+
+ for (short row = 0; row < NR0; row++) {
+ device const T04 * xb4 = ax4[row] + (ib*NB + il*NF)/4;
+
+ float sumq = 0.f;
+ FOR_UNROLL (short i = 0; i < NF4; ++i) {
+ sumq += dot(float4(xb4[i]), float4(yl4[i]));
+ }
+
+ sumf[row] += sumq;
+ }
+
+ yb4 += NSG*NF*NW/4;
+ }
+
+ for (int i = nb*NB + sgitg*NW + tiisg; i < args.ne00; i += NW*NSG) {
+ for (short row = 0; row < NR0; row++) {
+ sumf[row] += ax[row][i] * y[i];
+ }
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ helper_mv_reduce_and_write<NR0>(dst_f32, sumf, r0, args.ne01, tiisg, sgitg, shmem);
+}
+
+template<typename T0, typename T04, typename T1, typename T14, typename args_t>
+void kernel_mul_mv_t_t_4_disp(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ switch (args.nr0) {
+ //case 1: kernel_mul_mv_t_t_4_impl<T0, T04, T1, T14, 1, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ case 2: kernel_mul_mv_t_t_4_impl<T0, T04, T1, T14, 2, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ //case 3: kernel_mul_mv_t_t_4_impl<T0, T04, T1, T14, 3, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ //case 4: kernel_mul_mv_t_t_4_impl<T0, T04, T1, T14, 4, args_t>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg); break;
+ };
+}
+
+template<typename T0, typename T04, typename T1, typename T14>
+kernel void kernel_mul_mv_t_t_4(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ kernel_mul_mv_t_t_4_disp<T0, T04, T1, T14, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+typedef decltype(kernel_mul_mv_t_t_4<half, half4, half, half4>) mul_mv_t_t_4;
+
+template [[host_name("kernel_mul_mv_f32_f32_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4<float, float4, float, float4>;
+template [[host_name("kernel_mul_mv_f16_f32_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4<half, half4, float, float4>;
+template [[host_name("kernel_mul_mv_f16_f16_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4<half, half4, half, half4>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_mul_mv_bf16_f32_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4<bfloat, bfloat4, float, float4>;
+template [[host_name("kernel_mul_mv_bf16_bf16_4")]] kernel mul_mv_t_t_4 kernel_mul_mv_t_t_4<bfloat, bfloat4, bfloat, bfloat4>;
+#endif
+
+template<typename T0, typename T1, typename args_t>
+void kernel_mul_mv_t_t_short_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig,
+ ushort tiisg) {
+ const int r0 = tgpig.x*32 + tiisg;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ if (r0 >= args.ne01) {
+ return;
+ }
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = r0*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+
+ device const T0 * x = (device const T0 *) (src0 + offset0);
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1;
+
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const T1 * y = (device const T1 *) (src1 + offset1);
+
+ float res = 0.0f;
+
+ for (int i = 0; i < args.ne00; ++i) {
+ res += (float) x[i] * (float) y[i];
+ }
+
+ dst_f32[(uint64_t)r1*args.ne0 + r0] = res;
+}
+
+template<typename T0, typename T1>
+kernel void kernel_mul_mv_t_t_short(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]]) {
+ kernel_mul_mv_t_t_short_impl<T0, T1, constant ggml_metal_kargs_mul_mv &>(
+ args,
+ src0,
+ src1,
+ dst,
+ tgpig,
+ tiisg);
+}
+
+typedef decltype(kernel_mul_mv_t_t_short<half, half>) mul_mv_t_t_short_t;
+
+template [[host_name("kernel_mul_mv_f32_f32_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short<float, float>;
+template [[host_name("kernel_mul_mv_f16_f32_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short<half, float>;
+template [[host_name("kernel_mul_mv_f16_f16_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short<half, half>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_mul_mv_bf16_f32_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short<bfloat, float>;
+template [[host_name("kernel_mul_mv_bf16_bf16_short")]] kernel mul_mv_t_t_short_t kernel_mul_mv_t_t_short<bfloat, bfloat>;
+#endif
+
+constant bool FC_rope_is_imrope [[function_constant(FC_ROPE + 0)]];
+
+static float rope_yarn_ramp(const float low, const float high, const int i0) {
+ const float y = (i0 / 2 - low) / max(0.001f, high - low);
+ return 1.0f - min(1.0f, max(0.0f, y));
+}
+
+// YaRN algorithm based on LlamaYaRNScaledRotaryEmbedding.py from https://github.com/jquesnelle/yarn
+// MIT licensed. Copyright (c) 2023 Jeffrey Quesnelle and Bowen Peng.
+static void rope_yarn(
+ float theta_extrap, float freq_scale, float corr_dims[2], int i0, float ext_factor, float mscale,
+ thread float * cos_theta, thread float * sin_theta) {
+ // Get n-d rotational scaling corrected for extrapolation
+ float theta_interp = freq_scale * theta_extrap;
+ float theta = theta_interp;
+ if (ext_factor != 0.0f) {
+ float ramp_mix = rope_yarn_ramp(corr_dims[0], corr_dims[1], i0) * ext_factor;
+ theta = theta_interp * (1 - ramp_mix) + theta_extrap * ramp_mix;
+
+ // Get n-d magnitude scaling corrected for interpolation
+ mscale *= 1.0f + 0.1f * log(1.0f / freq_scale);
+ }
+ *cos_theta = cos(theta) * mscale;
+ *sin_theta = sin(theta) * mscale;
+}
+
+// Apparently solving `n_rot = 2pi * x * base^((2 * max_pos_emb) / n_dims)` for x, we get
+// `corr_fac(n_rot) = n_dims * log(max_pos_emb / (n_rot * 2pi)) / (2 * log(base))`
+static float rope_yarn_corr_factor(int n_dims, int n_ctx_orig, float n_rot, float base) {
+ return n_dims * log(n_ctx_orig / (n_rot * 2 * M_PI_F)) / (2 * log(base));
+}
+
+static void rope_yarn_corr_dims(
+ int n_dims, int n_ctx_orig, float freq_base, float beta_fast, float beta_slow, float dims[2]
+) {
+ // start and end correction dims
+ dims[0] = max(0.0f, floor(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_fast, freq_base)));
+ dims[1] = min(n_dims - 1.0f, ceil(rope_yarn_corr_factor(n_dims, n_ctx_orig, beta_slow, freq_base)));
+}
+
+template<typename T>
+kernel void kernel_rope_norm(
+ constant ggml_metal_kargs_rope & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * src2,
+ device char * dst,
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 tptg [[threads_per_threadgroup]],
+ uint3 tgpig[[threadgroup_position_in_grid]]) {
+ const int i3 = tgpig[2];
+ const int i2 = tgpig[1];
+ const int i1 = tgpig[0];
+
+ float corr_dims[2];
+ rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims);
+
+ device const int32_t * pos = (device const int32_t *) src1;
+
+ const float theta_base = (float) pos[i2];
+ const float inv_ndims = -1.f/args.n_dims;
+
+ float cos_theta;
+ float sin_theta;
+
+ for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) {
+ if (i0 < args.n_dims) {
+ const int ic = i0/2;
+
+ const float theta = theta_base * pow(args.freq_base, inv_ndims*i0);
+
+ const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f;
+
+ rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta);
+
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ const float x0 = src[0];
+ const float x1 = src[1];
+
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[1] = x0*sin_theta + x1*cos_theta;
+ } else {
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
+ }
+ }
+}
+
+template<typename T>
+kernel void kernel_rope_neox(
+ constant ggml_metal_kargs_rope & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * src2,
+ device char * dst,
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 tptg [[threads_per_threadgroup]],
+ uint3 tgpig[[threadgroup_position_in_grid]]) {
+ const int i3 = tgpig[2];
+ const int i2 = tgpig[1];
+ const int i1 = tgpig[0];
+
+ float corr_dims[2];
+ rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims);
+
+ device const int32_t * pos = (device const int32_t *) src1;
+
+ const float theta_base = (float) pos[i2];
+ const float inv_ndims = -1.f/args.n_dims;
+
+ float cos_theta;
+ float sin_theta;
+
+ for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) {
+ if (i0 < args.n_dims) {
+ const int ic = i0/2;
+
+ const float theta = theta_base * pow(args.freq_base, inv_ndims*i0);
+
+ const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f;
+
+ rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta);
+
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + ic*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + ic*args.nb0);
+
+ const float x0 = src[0];
+ const float x1 = src[args.n_dims/2];
+
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[args.n_dims/2] = x0*sin_theta + x1*cos_theta;
+ } else {
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
+ }
+ }
+}
+
+template<typename T>
+kernel void kernel_rope_multi(
+ constant ggml_metal_kargs_rope & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * src2,
+ device char * dst,
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 tptg [[threads_per_threadgroup]],
+ uint3 tgpig[[threadgroup_position_in_grid]]) {
+ const int i3 = tgpig[2];
+ const int i2 = tgpig[1];
+ const int i1 = tgpig[0];
+
+ float corr_dims[2];
+ rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims);
+
+ device const int32_t * pos = (device const int32_t *) src1;
+
+ const float inv_ndims = -1.f/args.n_dims;
+
+ float cos_theta;
+ float sin_theta;
+
+ for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) {
+ if (i0 < args.n_dims) {
+ const int ic = i0/2;
+
+ // mrope theta calculations
+ // note: the rest is the same as kernel_rope_neox
+ const int sect_dims = args.sect_0 + args.sect_1 + args.sect_2 + args.sect_3;
+ const int sec_w01 = args.sect_0 + args.sect_1; // end of section 1
+ const int sec_w012 = args.sect_0 + args.sect_1 + args.sect_2; // end of section 2
+ const int sector = ic % sect_dims;
+
+ float theta_base;
+ if (FC_rope_is_imrope) {
+ if (sector % 3 == 1 && sector < 3 * args.sect_1) { // h
+ theta_base = (float) pos[i2 + args.ne02 * 1];
+ } else if (sector % 3 == 2 && sector < 3 * args.sect_2) { // w
+ theta_base = (float) pos[i2 + args.ne02 * 2];
+ } else if (sector % 3 == 0 && sector < 3 * args.sect_0) { // t
+ theta_base = (float) pos[i2 + args.ne02 * 0];
+ } else { // e
+ theta_base = (float) pos[i2 + args.ne02 * 3];
+ }
+ } else {
+ if (sector < args.sect_0) {
+ theta_base = (float) pos[i2];
+ } else if (sector < sec_w01) {
+ theta_base = (float) pos[i2 + args.ne02 * 1];
+ } else if (sector < sec_w012) {
+ theta_base = (float) pos[i2 + args.ne02 * 2];
+ } else {
+ theta_base = (float) pos[i2 + args.ne02 * 3];
+ }
+ }
+ // end of mrope
+
+ const float theta = theta_base * pow(args.freq_base, inv_ndims*i0);
+
+ const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f;
+
+ rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta);
+
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + ic*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + ic*args.nb0);
+
+ const float x0 = src[0];
+ const float x1 = src[args.n_dims/2];
+
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[args.n_dims/2] = x0*sin_theta + x1*cos_theta;
+ } else {
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
+ }
+ }
+}
+
+template<typename T>
+kernel void kernel_rope_vision(
+ constant ggml_metal_kargs_rope & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * src2,
+ device char * dst,
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 tptg [[threads_per_threadgroup]],
+ uint3 tgpig[[threadgroup_position_in_grid]]) {
+ const int i3 = tgpig[2];
+ const int i2 = tgpig[1];
+ const int i1 = tgpig[0];
+
+ float corr_dims[2];
+ rope_yarn_corr_dims(args.n_dims, args.n_ctx_orig, args.freq_base, args.beta_fast, args.beta_slow, corr_dims);
+
+ device const int32_t * pos = (device const int32_t *) src1;
+
+ const float inv_ndims = -1.f/args.n_dims;
+
+ float cos_theta;
+ float sin_theta;
+
+ for (int i0 = 2*tiitg; i0 < args.ne0; i0 += 2*tptg.x) {
+ if (i0 < 2*args.n_dims) { // different from kernel_rope_multi
+ const int ic = i0/2;
+
+ // mrope theta calculations (only support 2 dimensions)
+ const int sect_dims = args.sect_0 + args.sect_1;
+ const int sector = ic % sect_dims;
+
+ float p;
+ float theta_base;
+ if (sector < args.sect_1) {
+ p = (float) sector;
+ theta_base = (float) pos[i2];
+ } else {
+ p = (float) sector - args.sect_0;
+ theta_base = (float) pos[i2 + args.ne02];
+ }
+
+ const float theta = theta_base * pow(args.freq_base, 2.0f * inv_ndims * p);
+ // end of mrope
+
+ const float freq_factor = args.src2 ? ((device const float *) src2)[ic] : 1.0f;
+
+ rope_yarn(theta/freq_factor, args.freq_scale, corr_dims, i0, args.ext_factor, args.attn_factor, &cos_theta, &sin_theta);
+
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + ic*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + ic*args.nb0);
+
+ const float x0 = src[0];
+ const float x1 = src[args.n_dims]; // different from kernel_rope_multi
+
+ dst_data[0] = x0*cos_theta - x1*sin_theta;
+ dst_data[args.n_dims] = x0*sin_theta + x1*cos_theta; // different from kernel_rope_multi
+ } else {
+ device const T * const src = (device T *)(src0 + i3*args.nb03 + i2*args.nb02 + i1*args.nb01 + i0*args.nb00);
+ device T * dst_data = (device T *)( dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ dst_data[0] = src[0];
+ dst_data[1] = src[1];
+ }
+ }
+}
+
+typedef decltype(kernel_rope_norm<float>) kernel_rope_norm_t;
+typedef decltype(kernel_rope_neox<float>) kernel_rope_neox_t;
+typedef decltype(kernel_rope_multi<float>) kernel_rope_multi_t;
+typedef decltype(kernel_rope_vision<float>) kernel_rope_vision_t;
+
+template [[host_name("kernel_rope_norm_f32")]] kernel kernel_rope_norm_t kernel_rope_norm<float>;
+template [[host_name("kernel_rope_norm_f16")]] kernel kernel_rope_norm_t kernel_rope_norm<half>;
+
+template [[host_name("kernel_rope_neox_f32")]] kernel kernel_rope_neox_t kernel_rope_neox<float>;
+template [[host_name("kernel_rope_neox_f16")]] kernel kernel_rope_neox_t kernel_rope_neox<half>;
+
+template [[host_name("kernel_rope_multi_f32")]] kernel kernel_rope_multi_t kernel_rope_multi<float>;
+template [[host_name("kernel_rope_multi_f16")]] kernel kernel_rope_multi_t kernel_rope_multi<half>;
+
+template [[host_name("kernel_rope_vision_f32")]] kernel kernel_rope_vision_t kernel_rope_vision<float>;
+template [[host_name("kernel_rope_vision_f16")]] kernel kernel_rope_vision_t kernel_rope_vision<half>;
+
+typedef void (im2col_t)(
+ constant ggml_metal_kargs_im2col & args,
+ device const float * x,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]);
+
+template <typename T>
+kernel void kernel_im2col(
+ constant ggml_metal_kargs_im2col & args,
+ device const float * x,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+// const int64_t IC = tgpg[0];
+ const int64_t OH = tgpg[1];
+ const int64_t OW = tgpg[2];
+
+ const int64_t KH = ntg[1];
+ const int64_t KW = ntg[2];
+
+ int64_t in = tpitg[0];
+ const int64_t ikh = tpitg[1];
+ const int64_t ikw = tpitg[2];
+
+ const int64_t iic = tgpig[0];
+ const int64_t ioh = tgpig[1];
+ const int64_t iow = tgpig[2];
+
+ const int64_t iiw = iow*args.s0 + ikw*args.d0 - args.p0;
+ const int64_t iih = ioh*args.s1 + ikh*args.d1 - args.p1;
+
+ int64_t offset_dst = (in*OH*OW + ioh*OW + iow)*args.CHW + (iic*(KH*KW) + ikh*KW + ikw);
+
+ device T * pdst = (device T *) (dst);
+
+ if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) {
+ while (in < args.N) {
+ pdst[offset_dst] = 0.0f;
+ offset_dst += ntg[0]*args.CHW*OH*OW;
+
+ in += ntg[0];
+ }
+ } else {
+ int64_t offset_src = in*args.ofs0 + iic*args.ofs1 + iih*args.IW + iiw;
+
+ while (in < args.N) {
+ pdst[offset_dst] = x[offset_src];
+
+ offset_dst += ntg[0]*args.CHW*OH*OW;
+ offset_src += ntg[0]*args.ofs0;
+
+ in += ntg[0];
+ }
+ }
+}
+
+template [[host_name("kernel_im2col_f32")]] kernel im2col_t kernel_im2col<float>;
+template [[host_name("kernel_im2col_f16")]] kernel im2col_t kernel_im2col<half>;
+
+// TODO: obolete -- remove
+//typedef void (im2col_ext_t)(
+// constant ggml_metal_kargs_im2col & args,
+// device const float * x,
+// device char * dst,
+// uint3 tgpig[[threadgroup_position_in_grid]],
+// uint3 tgpg[[threadgroups_per_grid]],
+// uint3 tpitg[[thread_position_in_threadgroup]],
+// uint3 ntg[[threads_per_threadgroup]]);
+//
+//template <typename T>
+//kernel void kernel_im2col_ext(
+// constant ggml_metal_kargs_im2col & args,
+// device const float * x,
+// device char * dst,
+// uint3 tgpig[[threadgroup_position_in_grid]],
+// uint3 tgpg[[threadgroups_per_grid]], // tgpg[0] = D x IC x KH x KW, CHW = IC x KH x KW
+// uint3 tpitg[[thread_position_in_threadgroup]],
+// uint3 ntg[[threads_per_threadgroup]]) { // [M, 1, 1]
+// const int64_t KHW = (int64_t)args.KHW;
+//
+// const int64_t d = tgpig[0] / args.CHW;
+// const int64_t chw = tgpig[0] % args.CHW;
+// const int64_t tgpig_0 = chw / KHW; // 0 ~ (IC - 1)
+// const int64_t HW = tgpig[0] % KHW;
+//
+// const int64_t tpitg_0 = (d * ntg[0]) + tpitg[0];
+// if (tpitg_0 >= args.N) {
+// return;
+// }
+//
+// const int64_t tpitg_1 = HW / args.KW;
+// const int64_t tpitg_2 = HW % args.KW;
+//
+// const int64_t iiw = tgpig[2] * args.s0 + tpitg_2 * args.d0 - args.p0;
+// const int64_t iih = tgpig[1] * args.s1 + tpitg_1 * args.d1 - args.p1;
+//
+// const int64_t offset_dst =
+// (tpitg_0 * tgpg[1] * tgpg[2] + tgpig[1] * tgpg[2] + tgpig[2]) * args.CHW +
+// (tgpig_0 * KHW + tpitg_1 * args.KW + tpitg_2);
+//
+// device T * pdst = (device T *) (dst);
+//
+// if (iih < 0 || iih >= args.IH || iiw < 0 || iiw >= args.IW) {
+// pdst[offset_dst] = 0.0f;
+// } else {
+// const int64_t offset_src = tpitg_0 * args.ofs0 + tgpig_0 * args.ofs1;
+// pdst[offset_dst] = x[offset_src + iih * args.IW + iiw];
+// }
+//}
+//
+//template [[host_name("kernel_im2col_ext_f32")]] kernel im2col_ext_t kernel_im2col_ext<float>;
+//template [[host_name("kernel_im2col_ext_f16")]] kernel im2col_ext_t kernel_im2col_ext<half>;
+
+template <typename TK>
+kernel void kernel_conv_2d(
+ constant ggml_metal_kargs_conv_2d & args,
+ device const char * weights,
+ device const char * src,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const uint threads_per_tg = ntg.x * ntg.y * ntg.z;
+ const uint tg_index = (tgpig.z * tgpg.y + tgpig.y) * tgpg.x + tgpig.x;
+ const uint local_thread = tpitg.z * (ntg.x * ntg.y) + tpitg.y * ntg.x + tpitg.x;
+ const uint thread_index = tg_index * threads_per_tg + local_thread;
+ const uint64_t total_threads = (uint64_t) threads_per_tg * tgpg.x * tgpg.y * tgpg.z;
+ const uint64_t total_outputs = (uint64_t) args.N * args.OC * args.OH * args.OW;
+
+ for (uint64_t index = thread_index; index < total_outputs; index += total_threads) {
+ uint64_t tmp = index;
+
+ const int32_t ow = tmp % args.OW; tmp /= args.OW;
+ const int32_t oh = tmp % args.OH; tmp /= args.OH;
+ const int32_t oc = tmp % args.OC; tmp /= args.OC;
+ const int32_t n = tmp;
+
+ float acc = 0.0f;
+
+ const int32_t base_x = ow*args.s0 - args.p0;
+ const int32_t base_y = oh*args.s1 - args.p1;
+
+ int32_t ky_start = 0;
+ if (base_y < 0) {
+ ky_start = (-base_y + args.d1 - 1)/args.d1;
+ }
+ int32_t ky_end = args.KH;
+ const int32_t y_max = args.IH - 1 - base_y;
+ if (y_max < 0) {
+ ky_end = ky_start;
+ } else if (base_y + (args.KH - 1)*args.d1 >= args.IH) {
+ ky_end = min(ky_end, y_max/args.d1 + 1);
+ }
+
+ int32_t kx_start = 0;
+ if (base_x < 0) {
+ kx_start = (-base_x + args.d0 - 1)/args.d0;
+ }
+ int32_t kx_end = args.KW;
+ const int32_t x_max = args.IW - 1 - base_x;
+ if (x_max < 0) {
+ kx_end = kx_start;
+ } else if (base_x + (args.KW - 1)*args.d0 >= args.IW) {
+ kx_end = min(kx_end, x_max/args.d0 + 1);
+ }
+
+ if (ky_start < ky_end && kx_start < kx_end) {
+ const uint64_t src_base_n = (uint64_t) n * args.nb13;
+ const uint64_t w_base_oc = (uint64_t) oc * args.nb03;
+
+ for (int32_t ic = 0; ic < args.IC; ++ic) {
+ const uint64_t src_base_nc = src_base_n + (uint64_t) ic * args.nb12;
+ const uint64_t w_base_ocic = w_base_oc + (uint64_t) ic * args.nb02;
+
+ for (int32_t ky = ky_start; ky < ky_end; ++ky) {
+ const int32_t iy = base_y + ky*args.d1;
+ const uint64_t src_base_row = src_base_nc + (uint64_t) iy * args.nb11;
+ const uint64_t w_base_row = w_base_ocic + (uint64_t) ky * args.nb01;
+
+ for (int32_t kx = kx_start; kx < kx_end; ++kx) {
+ const int32_t ix = base_x + kx*args.d0;
+ const uint64_t src_offs = src_base_row + (uint64_t) ix * args.nb10;
+ const uint64_t w_offs = w_base_row + (uint64_t) kx * args.nb00;
+
+ const float x = *(device const float *)(src + src_offs);
+ const float w = (float) (*(device const TK *)(weights + w_offs));
+
+ acc += x * w;
+ }
+ }
+ }
+ }
+
+ const uint64_t dst_offs =
+ (uint64_t) n * args.nb3 +
+ (uint64_t) oc * args.nb2 +
+ (uint64_t) oh * args.nb1 +
+ (uint64_t) ow * args.nb0;
+
+ *(device float *)(dst + dst_offs) = acc;
+ }
+}
+
+template [[host_name("kernel_conv_2d_f32_f32")]]
+kernel void kernel_conv_2d<float>(
+ constant ggml_metal_kargs_conv_2d & args,
+ device const char * weights,
+ device const char * src,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]);
+
+template [[host_name("kernel_conv_2d_f16_f32")]]
+kernel void kernel_conv_2d<half>(
+ constant ggml_metal_kargs_conv_2d & args,
+ device const char * weights,
+ device const char * src,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]);
+
+typedef void (conv_transpose_1d_t)(
+ constant ggml_metal_kargs_conv_transpose_1d & args,
+ device const float * src0,
+ device const float * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]]);
+
+template <typename T>
+kernel void kernel_conv_transpose_1d(
+ constant ggml_metal_kargs_conv_transpose_1d & args,
+ device const T * src0,
+ device const float * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]]) {
+
+ float v = 0.0f;
+
+ for (int64_t c = 0; c < args.IC; c++) {
+ const int32_t kernel_offset = c * tgpg[1] * args.K + args.K * tgpig[1];
+ const int32_t input_offset = c * args.IL;
+
+ for (int64_t i = 0; i < args.IL; i++) {
+ if (tgpig[0] >= i * args.s0 && tgpig[0] < i * args.s0 + args.K) {
+ v += src0[kernel_offset + tgpig[0] - i * args.s0] * src1[input_offset + i];
+ }
+ }
+ }
+
+ device float * dst_ptr = (device float *) (dst + tgpig[0] * args.nb0 + tgpig[1] * args.nb1);
+
+ dst_ptr[0] = v;
+}
+
+template [[host_name("kernel_conv_transpose_1d_f32_f32")]]
+kernel void kernel_conv_transpose_1d<float>(
+ constant ggml_metal_kargs_conv_transpose_1d & args,
+ device const float * src0,
+ device const float * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]]);
+
+template [[host_name("kernel_conv_transpose_1d_f16_f32")]]
+kernel void kernel_conv_transpose_1d<half>(
+ constant ggml_metal_kargs_conv_transpose_1d & args,
+ device const half * src0,
+ device const float * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]]);
+
+
+typedef void (conv_transpose_2d_t)(
+ constant ggml_metal_kargs_conv_transpose_2d & args,
+ device const float * src0,
+ device const float * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]]);
+
+template <typename T>
+kernel void kernel_conv_transpose_2d(
+ constant ggml_metal_kargs_conv_transpose_2d & args,
+ device const T * src0,
+ device const float * src1,
+ device char * dst,
+ threadgroup float * shared_sum [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const int64_t out_x = tgpig[0];
+ const int64_t out_y = tgpig[1];
+ const int64_t out_c = tgpig[2];
+
+ const int64_t kw = tpitg[0];
+ const int64_t kh = tpitg[1];
+
+ float v = 0.0f;
+
+ for (int64_t in_c = 0; in_c < args.IC; in_c++) {
+ int64_t in_y = out_y - kh;
+
+ if (in_y < 0 || in_y % args.s0) continue;
+
+ in_y /= args.s0;
+
+ if (in_y >= args.IH) continue;
+
+ int64_t in_x = out_x - kw;
+
+ if (in_x < 0 || in_x % args.s0) continue;
+
+ in_x /= args.s0;
+
+ if (in_x >= args.IW) continue;
+
+ const int64_t input_idx = (args.IW * args.IH) * in_c + (args.IW) * in_y + in_x;
+ const int64_t kernel_idx = (args.KH * args.KW * args.OC) * in_c + (args.KH * args.KW) * out_c + (args.KW) * kh + kw;
+
+ v += (float)src0[kernel_idx] * src1[input_idx];
+ }
+
+ const uint tid = tpitg.y * ntg.x + tpitg.x;
+ shared_sum[tid] = v;
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (tid == 0) {
+ float total = 0.0f;
+ const uint num_threads = ntg.x * ntg.y;
+ for (uint i = 0; i < num_threads; i++) {
+ total += shared_sum[i];
+ }
+
+ device float * dst_ptr = (device float *) (dst + out_x*args.nb0 + out_y * args.nb1 + out_c*args.nb2);
+ dst_ptr[0] = total;
+ }
+}
+
+template [[host_name("kernel_conv_transpose_2d_f32_f32")]]
+kernel void kernel_conv_transpose_2d<float>(
+ constant ggml_metal_kargs_conv_transpose_2d & args,
+ device const float * src0,
+ device const float * src1,
+ device char * dst,
+ threadgroup float * shared_sum [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]);
+
+template [[host_name("kernel_conv_transpose_2d_f16_f32")]]
+kernel void kernel_conv_transpose_2d<half>(
+ constant ggml_metal_kargs_conv_transpose_2d & args,
+ device const half * src0,
+ device const float * src1,
+ device char * dst,
+ threadgroup float * shared_sum [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]);
+
+kernel void kernel_upscale_f32(
+ constant ggml_metal_kargs_upscale & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const int64_t i3 = tgpig.z;
+ const int64_t i2 = tgpig.y;
+ const int64_t i1 = tgpig.x;
+
+ const int64_t i03 = i3/args.sf3;
+ const int64_t i02 = i2/args.sf2;
+ const int64_t i01 = i1/args.sf1;
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ const int64_t i00 = i0/args.sf0;
+
+ device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00);
+ device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ dst_ptr[0] = src0_ptr[0];
+ }
+}
+
+kernel void kernel_pad_f32(
+ constant ggml_metal_kargs_pad & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const int64_t i3 = tgpig.z;
+ const int64_t i2 = tgpig.y;
+ const int64_t i1 = tgpig.x;
+
+ const int64_t i03 = i3;
+ const int64_t i02 = i2;
+ const int64_t i01 = i1;
+
+ device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01);
+ device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1);
+
+ if (i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) {
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ if (i0 < args.ne00) {
+ dst_ptr[i0] = src0_ptr[i0];
+ } else {
+ dst_ptr[i0] = 0.0f;
+ }
+ }
+
+ return;
+ }
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ dst_ptr[i0] = 0.0f;
+ }
+}
+
+kernel void kernel_pad_reflect_1d_f32(
+ constant ggml_metal_kargs_pad_reflect_1d & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tgpg[[threadgroups_per_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ const int64_t i3 = tgpig.z;
+ const int64_t i2 = tgpig.y;
+ const int64_t i1 = tgpig.x;
+
+ const int64_t i03 = i3;
+ const int64_t i02 = i2;
+ const int64_t i01 = i1;
+
+ device const float * src0_ptr = (device const float *) (src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01);
+ device float * dst_ptr = (device float *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1);
+
+ if (i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) {
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ if (i0 < args.p0) {
+ dst_ptr[i0] = src0_ptr[args.p0 - i0];
+ } else if (i0 < args.ne0 - args.p1) {
+ dst_ptr[i0] = src0_ptr[i0 - args.p0];
+ } else {
+ dst_ptr[i0] = src0_ptr[(args.ne0 - args.p1 - args.p0) - (args.p1 + 1 - (args.ne0 - i0)) - 1];
+ }
+ }
+ }
+}
+
+kernel void kernel_arange_f32(
+ constant ggml_metal_kargs_arange & args,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ device float * dst_ptr = (device float *) dst;
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ dst_ptr[i0] = args.start + args.step * i0;
+ }
+}
+
+kernel void kernel_timestep_embedding_f32(
+ constant ggml_metal_kargs_timestep_embedding & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint3 tpitg[[thread_position_in_threadgroup]],
+ uint3 ntg[[threads_per_threadgroup]]) {
+
+ int i = tgpig.x;
+ device float * embed_data = (device float *)(dst + i*args.nb1);
+
+ int half_ = args.dim / 2;
+ for (int j = tpitg.x; j < half_; j += ntg.x) {
+ float timestep = ((device float *)src0)[i];
+ float freq = (float)exp(-log((float)args.max_period) * j / half_);
+ float arg = timestep * freq;
+ embed_data[j ] = cos(arg);
+ embed_data[j + half_] = sin(arg);
+ }
+
+ if (args.dim % 2 != 0 && tpitg.x == 0) {
+ embed_data[2 * half_] = 0.f;
+ }
+}
+
+// bitonic sort implementation following the CUDA kernels as reference
+typedef void (argsort_t)(
+ constant ggml_metal_kargs_argsort & args,
+ device const char * src0,
+ device int32_t * dst,
+ threadgroup int32_t * shmem_i32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]);
+
+template<ggml_sort_order order>
+kernel void kernel_argsort_f32_i32(
+ constant ggml_metal_kargs_argsort & args,
+ device const char * src0,
+ device int32_t * dst,
+ threadgroup int32_t * shmem_i32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ // bitonic sort
+ const int col = tpitg[0];
+ const int ib = tgpig[0] / args.ne01;
+
+ const int i00 = ib*ntg.x;
+ const int i01 = tgpig[0] % args.ne01;
+ const int i02 = tgpig[1];
+ const int i03 = tgpig[2];
+
+ device const float * src0_row = (device const float *) (src0 + args.nb01*i01 + args.nb02*i02 + args.nb03*i03);
+
+ // initialize indices
+ shmem_i32[col] = i00 + col;
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (int k = 2; k <= ntg.x; k *= 2) {
+ for (int j = k / 2; j > 0; j /= 2) {
+ int ixj = col ^ j;
+ if (ixj > col) {
+ if ((col & k) == 0) {
+ if (shmem_i32[col] >= args.ne00 ||
+ (shmem_i32[ixj] < args.ne00 && (order == GGML_SORT_ORDER_ASC ?
+ src0_row[shmem_i32[col]] > src0_row[shmem_i32[ixj]] :
+ src0_row[shmem_i32[col]] < src0_row[shmem_i32[ixj]]))
+ ) {
+ SWAP(shmem_i32[col], shmem_i32[ixj]);
+ }
+ } else {
+ if (shmem_i32[ixj] >= args.ne00 ||
+ (shmem_i32[col] < args.ne00 && (order == GGML_SORT_ORDER_ASC ?
+ src0_row[shmem_i32[col]] < src0_row[shmem_i32[ixj]] :
+ src0_row[shmem_i32[col]] > src0_row[shmem_i32[ixj]]))
+ ) {
+ SWAP(shmem_i32[col], shmem_i32[ixj]);
+ }
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+ }
+
+ const int64_t i0 = ib*args.top_k;
+
+ // copy the result to dst without the padding
+ if (i0 + col < args.ne0 && col < args.top_k) {
+ dst += i0 + args.ne0*i01 + args.ne0*args.ne1*i02 + args.ne0*args.ne1*args.ne2*i03;
+
+ dst[col] = shmem_i32[col];
+ }
+}
+
+template [[host_name("kernel_argsort_f32_i32_asc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_ORDER_ASC>;
+template [[host_name("kernel_argsort_f32_i32_desc")]] kernel argsort_t kernel_argsort_f32_i32<GGML_SORT_ORDER_DESC>;
+
+typedef void (argsort_merge_t)(
+ constant ggml_metal_kargs_argsort_merge & args,
+ device const char * src0,
+ device const int32_t * tmp,
+ device int32_t * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]);
+
+template<ggml_sort_order order>
+kernel void kernel_argsort_merge_f32_i32(
+ constant ggml_metal_kargs_argsort_merge & args,
+ device const char * src0,
+ device const int32_t * tmp,
+ device int32_t * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+
+ const int im = tgpig[0] / args.ne01;
+ const int i01 = tgpig[0] % args.ne01;
+ const int i02 = tgpig[1];
+ const int i03 = tgpig[2];
+
+ const int start = im * (2 * args.len);
+
+ const int len0 = MIN(args.len, MAX(0, args.ne0 - (int)(start)));
+ const int len1 = MIN(args.len, MAX(0, args.ne0 - (int)(start + args.len)));
+
+ const int total = len0 + len1;
+
+ device const int32_t * tmp0 = tmp + start
+ + i01*args.ne0
+ + i02*args.ne0*args.ne01
+ + i03*args.ne0*args.ne01*args.ne02;
+
+ device const int32_t * tmp1 = tmp0 + args.len;
+
+ dst += start
+ + i01*args.top_k
+ + i02*args.top_k*args.ne01
+ + i03*args.top_k*args.ne01*args.ne02;
+
+ device const float * src0_row = (device const float *)(src0
+ + args.nb01*i01
+ + args.nb02*i02
+ + args.nb03*i03);
+
+ if (total == 0) {
+ return;
+ }
+
+ const int chunk = (total + ntg.x - 1) / ntg.x;
+
+ const int k0 = tpitg.x * chunk;
+ const int k1 = MIN(MIN(k0 + chunk, total), args.top_k);
+
+ if (k0 >= args.top_k) {
+ return;
+ }
+
+ if (k0 >= total) {
+ return;
+ }
+
+ int low = k0 > len1 ? k0 - len1 : 0;
+ int high = MIN(k0, len0);
+
+ // binary-search partition (i, j) such that i + j = k
+ while (low < high) {
+ const int mid = (low + high) >> 1;
+
+ const int32_t idx0 = tmp0[mid];
+ const int32_t idx1 = tmp1[k0 - mid - 1];
+
+ const float val0 = src0_row[idx0];
+ const float val1 = src0_row[idx1];
+
+ bool take_left;
+ if (order == GGML_SORT_ORDER_ASC) {
+ take_left = (val0 <= val1);
+ } else {
+ take_left = (val0 >= val1);
+ }
+
+ if (take_left) {
+ low = mid + 1;
+ } else {
+ high = mid;
+ }
+ }
+
+ int i = low;
+ int j = k0 - i;
+
+ // keep the merge fronts into registers
+ int32_t idx0 = 0;
+ float val0 = 0.0f;
+ if (i < len0) {
+ idx0 = tmp0[i];
+ val0 = src0_row[idx0];
+ }
+
+ int32_t idx1 = 0;
+ float val1 = 0.0f;
+ if (j < len1) {
+ idx1 = tmp1[j];
+ val1 = src0_row[idx1];
+ }
+
+ for (int k = k0; k < k1; ++k) {
+ int32_t out_idx;
+
+ if (i >= len0) {
+ while (k < k1) {
+ dst[k++] = tmp1[j++];
+ }
+ break;
+ } else if (j >= len1) {
+ while (k < k1) {
+ dst[k++] = tmp0[i++];
+ }
+ break;
+ } else {
+ bool take_left;
+
+ if (order == GGML_SORT_ORDER_ASC) {
+ take_left = (val0 <= val1);
+ } else {
+ take_left = (val0 >= val1);
+ }
+
+ if (take_left) {
+ out_idx = idx0;
+ ++i;
+ if (i < len0) {
+ idx0 = tmp0[i];
+ val0 = src0_row[idx0];
+ }
+ } else {
+ out_idx = idx1;
+ ++j;
+ if (j < len1) {
+ idx1 = tmp1[j];
+ val1 = src0_row[idx1];
+ }
+ }
+ }
+
+ dst[k] = out_idx;
+ }
+}
+
+template [[host_name("kernel_argsort_merge_f32_i32_asc")]] kernel argsort_merge_t kernel_argsort_merge_f32_i32<GGML_SORT_ORDER_ASC>;
+template [[host_name("kernel_argsort_merge_f32_i32_desc")]] kernel argsort_merge_t kernel_argsort_merge_f32_i32<GGML_SORT_ORDER_DESC>;
+
+constant bool FC_flash_attn_ext_pad_has_mask [[function_constant(FC_FLASH_ATTN_EXT_PAD + 0)]];
+
+constant int32_t FC_flash_attn_ext_pad_ncpsg [[function_constant(FC_FLASH_ATTN_EXT_PAD + 25)]];
+
+// pad the last chunk of C elements of k and v into a an extra pad buffer
+kernel void kernel_flash_attn_ext_pad(
+ constant ggml_metal_kargs_flash_attn_ext_pad & args,
+ device const char * k,
+ device const char * v,
+ device const char * mask,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int32_t C = FC_flash_attn_ext_pad_ncpsg;
+
+ device char * k_pad = dst;
+ device char * v_pad = k_pad + args.nb11*C*args.ne_12_2*args.ne_12_3;
+ device char * mask_pad = v_pad + args.nb21*C*args.ne_12_2*args.ne_12_3;
+
+ const int32_t icp = args.ne11 % C;
+ const int32_t ic0 = args.ne11 - icp;
+
+ const int32_t i1 = tgpig[0];
+ const int32_t i2 = tgpig[1];
+ const int32_t i3 = tgpig[2];
+
+ if (i2 < args.ne_12_2 && i3 < args.ne_12_3) {
+ device const char * k_src = k + args.nb11*(ic0 + i1) + args.nb12*i2 + args.nb13*i3;
+ device const char * v_src = v + args.nb21*(ic0 + i1) + args.nb22*i2 + args.nb23*i3;
+
+ device char * k_dst = k_pad + args.nb11*i1 + args.nb11*C*i2 + args.nb11*C*args.ne_12_2*i3;
+ device char * v_dst = v_pad + args.nb21*i1 + args.nb21*C*i2 + args.nb21*C*args.ne_12_2*i3;
+
+ if (i1 >= icp) {
+ // here it is not important the exact value that will be used as we rely on masking out the scores in the attention
+ for (uint64_t i = tiitg; i < args.nb11; i += ntg.x) {
+ k_dst[i] = 0;
+ }
+ for (uint64_t i = tiitg; i < args.nb21; i += ntg.x) {
+ v_dst[i] = 0;
+ }
+ } else {
+ for (uint64_t i = tiitg; i < args.nb11; i += ntg.x) {
+ k_dst[i] = k_src[i];
+ }
+ for (uint64_t i = tiitg; i < args.nb21; i += ntg.x) {
+ v_dst[i] = v_src[i];
+ }
+ }
+ }
+
+ if (FC_flash_attn_ext_pad_has_mask) {
+ if (i2 < args.ne32 && i3 < args.ne33) {
+ for (int ib = i1; ib < args.ne31; ib += C) {
+ device const half * mask_src = (device const half *)(mask + args.nb31*ib + args.nb32*i2 + args.nb33*i3) + ic0;
+ device half * mask_dst = (device half *)(mask_pad) + C*ib + C*args.ne31*i2 + C*args.ne31*args.ne32*i3;
+
+ for (int i = tiitg; i < C; i += ntg.x) {
+ if (i >= icp) {
+ mask_dst[i] = -MAXHALF;
+ } else {
+ mask_dst[i] = mask_src[i];
+ }
+ }
+ }
+ }
+ }
+}
+
+constant int32_t FC_flash_attn_ext_blk_nqptg [[function_constant(FC_FLASH_ATTN_EXT_BLK + 24)]];
+constant int32_t FC_flash_attn_ext_blk_ncpsg [[function_constant(FC_FLASH_ATTN_EXT_BLK + 25)]];
+
+// scan the blocks of the mask that are not masked
+// 0 - masked (i.e. full of -INF, skip)
+// 1 - not masked (i.e. at least one element of the mask is not -INF)
+// 2 - all zero
+kernel void kernel_flash_attn_ext_blk(
+ constant ggml_metal_kargs_flash_attn_ext_blk & args,
+ device const char * mask,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]]) {
+ // block size C x Q
+ const int32_t Q = FC_flash_attn_ext_blk_nqptg;
+ const int32_t C = FC_flash_attn_ext_blk_ncpsg;
+
+ constexpr short NW = N_SIMDWIDTH;
+
+ const int32_t i3 = tgpig[2]/args.ne32;
+ const int32_t i2 = tgpig[2]%args.ne32;
+ const int32_t i1 = tgpig[1];
+ const int32_t i0 = tgpig[0];
+
+ char res = i0*C + C > args.ne30 ? 1 : 0;
+
+ device const half * mask_src = (device const half *) (mask + (i1*Q)*args.nb31 + i2*args.nb32 + i3*args.nb33) + i0*C + tiisg;
+
+ // detailed check of the elements of the block
+ if ((C > NW || Q > 1) && res == 0) {
+ half mmin = MAXHALF;
+ half mmax = -MAXHALF;
+
+ FOR_UNROLL (short j = 0; j < Q; ++j) {
+ FOR_UNROLL (short ii = 0; ii < C/NW; ++ii) {
+ mmin = min(mmin, mask_src[ii*NW]);
+ mmax = max(mmax, mask_src[ii*NW]);
+ }
+
+ mask_src += args.nb31/2;
+ }
+
+ mmin = simd_min(mmin);
+ mmax = simd_max(mmax);
+
+ if (mmax > -MAXHALF) {
+ if (mmin == 0.0 && mmax == 0.0) {
+ res = 2;
+ } else {
+ res = 1;
+ }
+ }
+ }
+
+ const int32_t nblk1 = ((args.ne01 + Q - 1)/Q);
+ const int32_t nblk0 = ((args.ne30 + C - 1)/C);
+
+ if (tiisg == 0) {
+ dst[((i3*args.ne32 + i2)*nblk1 + i1)*nblk0 + i0] = res;
+ }
+}
+
+constant bool FC_flash_attn_ext_has_mask [[function_constant(FC_FLASH_ATTN_EXT + 0)]];
+constant bool FC_flash_attn_ext_has_sinks [[function_constant(FC_FLASH_ATTN_EXT + 1)]];
+constant bool FC_flash_attn_ext_has_bias [[function_constant(FC_FLASH_ATTN_EXT + 2)]];
+constant bool FC_flash_attn_ext_has_scap [[function_constant(FC_FLASH_ATTN_EXT + 3)]];
+constant bool FC_flash_attn_ext_has_kvpad [[function_constant(FC_FLASH_ATTN_EXT + 4)]];
+
+constant bool FC_flash_attn_ext_bc_mask [[function_constant(FC_FLASH_ATTN_EXT + 10)]];
+
+//constant float FC_flash_attn_ext_scale [[function_constant(FC_FLASH_ATTN_EXT + 10)]];
+//constant float FC_flash_attn_ext_max_bias [[function_constant(FC_FLASH_ATTN_EXT + 11)]];
+//constant float FC_flash_attn_ext_logit_softcap [[function_constant(FC_FLASH_ATTN_EXT + 12)]];
+
+constant int32_t FC_flash_attn_ext_ns10 [[function_constant(FC_FLASH_ATTN_EXT + 20)]];
+constant int32_t FC_flash_attn_ext_ns20 [[function_constant(FC_FLASH_ATTN_EXT + 21)]];
+constant int32_t FC_flash_attn_ext_nsg [[function_constant(FC_FLASH_ATTN_EXT + 22)]];
+
+// ref: https://arxiv.org/pdf/2307.08691.pdf
+template<
+ typename q_t, // query types in shared memory
+ typename q4_t,
+ typename q8x8_t,
+ typename k_t, // key types in shared memory
+ typename k4x4_t,
+ typename k8x8_t,
+ typename v_t, // value types in shared memory
+ typename v4x4_t,
+ typename v8x8_t,
+ typename qk_t, // Q*K types
+ typename qk8x8_t,
+ typename s_t, // soft-max types
+ typename s2_t,
+ typename s8x8_t,
+ typename o_t, // attention accumulation types
+ typename o4_t,
+ typename o8x8_t,
+ typename kd4x4_t, // key type in device memory
+ short nl_k,
+ void (*deq_k)(device const kd4x4_t *, short, thread k4x4_t &),
+ typename vd4x4_t, // value type in device memory
+ short nl_v,
+ void (*deq_v)(device const vd4x4_t *, short, thread v4x4_t &),
+ short DK, // K head size
+ short DV, // V head size
+ short Q, // queries per threadgroup
+ short C, // cache items per threadgroup
+ short NSG> // number of simd groups
+void kernel_flash_attn_ext_impl(
+ constant ggml_metal_kargs_flash_attn_ext & args,
+ device const char * q,
+ device const char * k,
+ device const char * v,
+ device const char * mask,
+ device const char * sinks,
+ device const char * pad,
+ device const char * blk,
+ device char * dst,
+ threadgroup half * shmem_f16,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const ushort iq3 = tgpig[2];
+ const ushort iq2 = tgpig[1];
+ const ushort iq1 = tgpig[0]*Q;
+
+#define NS10 (FC_flash_attn_ext_ns10)
+#define NS20 (FC_flash_attn_ext_ns20)
+
+ // note: I had some concerns that using this instead of the ugly macros above was affecting performance
+ // need to re-check carefully and if no regressions are observerd - remove the macros
+ // the concerns is that maybe using const variables requires extra registers? but not sure if the compiler
+ // is clever enough to avoid this. unfortunately, using constexpr is not possible with FC
+ //const short NS10 = FC_flash_attn_ext_ns10;
+ //const short NS20 = FC_flash_attn_ext_ns20;
+
+ constexpr short KV = 8;
+
+ constexpr short DK4 = DK/4;
+ constexpr short DK8 = DK/8;
+ constexpr short DK16 = DK/16;
+ constexpr short DV4 = DV/4;
+ //constexpr short DV8 = DV/8;
+ constexpr short DV16 = DV/16;
+
+ constexpr short PV = PAD2(DV, 64);
+ constexpr short PV4 = PV/4;
+ constexpr short PV8 = PV/8;
+ //constexpr short PV16 = PV/16;
+
+ constexpr short NW = N_SIMDWIDTH;
+ constexpr short NQ = Q/NSG;
+ constexpr short SH = 2*C; // shared memory per simdgroup (s_t == float)
+
+ constexpr short TS = 2*SH;
+ constexpr short T = DK + 2*PV; // shared memory size per query in (half)
+
+ threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*T); // holds the query data
+ threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*T); // same as above but in q4_t
+ threadgroup o_t * so = (threadgroup o_t *) (shmem_f16 + 0*T + Q*DK); // the result for all queries in 8x8 matrices (the O matrix from the paper)
+ threadgroup o4_t * so4 = (threadgroup o4_t *) (shmem_f16 + 0*T + Q*DK);
+ threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + Q*T); // scratch buffer for attention, mask and diagonal matrix
+ threadgroup s2_t * ss2 = (threadgroup s2_t *) (shmem_f16 + Q*T); // same as above but in s2_t
+
+ threadgroup k_t * sk = (threadgroup k_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // scratch buffer to load K in shared memory
+ threadgroup k4x4_t * sk4x4 = (threadgroup k4x4_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // same as above but in k4x4_t
+
+ threadgroup v_t * sv = (threadgroup v_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // scratch buffer to load V in shared memory
+ threadgroup v4x4_t * sv4x4 = (threadgroup v4x4_t *) (shmem_f16 + sgitg*(4*16*KV) + Q*T + Q*TS); // same as above but in v4x4_t
+
+ // mask storage in shared mem
+ threadgroup half2 * sm2 = (threadgroup half2 *) (shmem_f16 + Q*T + 2*C);
+
+ // per-query mask pointers
+ device const half2 * pm2[NQ];
+
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ pm2[jj] = (device const half2 *) ((device const char *) mask + (iq1 + j)*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33);
+ }
+
+ {
+ const int32_t nblk1 = ((args.ne01 + Q - 1)/Q);
+ const int32_t nblk0 = ((args.ne11 + C - 1)/C);
+
+ blk += (((iq3%args.ne33)*args.ne32 + (iq2%args.ne32))*nblk1 + iq1/Q)*nblk0;
+ }
+
+ {
+ q += iq1*args.nb01 + iq2*args.nb02 + iq3*args.nb03;
+
+ const short ikv2 = iq2/(args.ne02/args.ne_12_2);
+ const short ikv3 = iq3/(args.ne03/args.ne_12_3);
+
+ k += ikv2*args.nb12 + ikv3*args.nb13;
+ v += ikv2*args.nb22 + ikv3*args.nb23;
+ }
+
+ // load heads from Q to shared memory
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ device const float4 * q4 = (device const float4 *) ((device const char *) q + j*args.nb01);
+
+ for (short i = tiisg; i < DK4; i += NW) {
+ if (iq1 + j < args.ne01) {
+ sq4[j*DK4 + i] = (q4_t) q4[i];
+ } else {
+ sq4[j*DK4 + i] = 0;
+ }
+ }
+ }
+
+ // zero out
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ for (short i = tiisg; i < DV4; i += NW) {
+ so4[j*PV4 + i] = 0;
+ }
+
+ for (short i = tiisg; i < SH; i += NW) {
+ ss[j*SH + i] = 0.0f;
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ float S[NQ] = { [0 ... NQ-1] = 0.0f };
+
+ {
+ float M[NQ] = { [0 ... NQ-1] = -FLT_MAX/2 };
+
+ float slope = 1.0f;
+
+ // ALiBi
+ if (FC_flash_attn_ext_has_bias) {
+ const short h = iq2;
+
+ const float base = h < args.n_head_log2 ? args.m0 : args.m1;
+ const short exph = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1;
+
+ slope = pow(base, exph);
+ }
+
+ // loop over the KV cache
+ // each simdgroup handles blocks of Q rows and C columns
+ for (int ic0 = 0; ; ++ic0) {
+ int ic = ic0*C;
+ if (ic >= args.ne11) {
+ break;
+ }
+
+ // the last partial chunk uses the pad buffer as source
+ if (FC_flash_attn_ext_has_kvpad && ic + C > args.ne11) {
+ k = pad;
+ v = k + args.nb11*C*args.ne_12_2*args.ne_12_3;
+ mask = v + args.nb21*C*args.ne_12_2*args.ne_12_3;
+
+ const short ikv2 = iq2/(args.ne02/args.ne_12_2);
+ const short ikv3 = iq3/(args.ne03/args.ne_12_3);
+
+ k += (ikv2 + ikv3*args.ne_12_2)*args.nb11*C;
+ v += (ikv2 + ikv3*args.ne_12_2)*args.nb21*C;
+
+ if (!FC_flash_attn_ext_has_mask) {
+ threadgroup half * sm = (threadgroup half *) (sm2);
+
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ for (short i = tiisg; i < C; i += NW) {
+ if (ic + i >= args.ne11) {
+ sm[2*j*SH + i] = -MAXHALF;
+ }
+ }
+ }
+ } else {
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ pm2[jj] = (device const half2 *) ((device const half *) mask +
+ (iq1 + j)*C +
+ (iq2%args.ne32)*(C*args.ne31) +
+ (iq3%args.ne33)*(C*args.ne31*args.ne32));
+ }
+ }
+
+ ic = 0;
+ }
+
+ char blk_cur = 1;
+
+ // read the mask into shared mem
+ if (FC_flash_attn_ext_has_mask) {
+ blk_cur = blk[ic0];
+
+ if (blk_cur == 0) {
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ pm2[jj] += NW;
+ }
+
+ continue;
+ }
+
+ if (blk_cur == 1) {
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ if (FC_flash_attn_ext_bc_mask) {
+ sm2[j*SH + tiisg] = (iq1 + j) < args.ne31 ? pm2[jj][tiisg] : half2(-MAXHALF, -MAXHALF);
+ } else {
+ sm2[j*SH + tiisg] = pm2[jj][tiisg];
+ }
+
+ pm2[jj] += NW;
+ }
+ } else if (blk_cur == 2) {
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ pm2[jj] += NW;
+ }
+ }
+
+#if 0
+ // note: old -INF block optimization - obsoleted by pre-computing non-masked blocks
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // used to detect blocks full of -INF
+ // skip only when the entire threadgroup is masked
+ half2 smax2(-MAXHALF/2, -MAXHALF/2);
+
+ FOR_UNROLL (short j = 0; j < Q; ++j) {
+ smax2 = max(smax2, sm2[j*SH + tiisg]);
+ }
+
+ smax2 = simd_max(smax2);
+
+ if (max(smax2[0], smax2[1]) <= -MAXHALF/2) {
+ // this barrier is important
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ continue;
+ }
+#endif
+ }
+
+ // Q*K^T
+ // this is compile-time check, so it does not have runtime overhead
+ if (is_same<kd4x4_t, k4x4_t>::value) {
+ // we can read directly from global memory
+ device const k_t * pk = (device const k_t *) (k + ic*args.nb11);
+ threadgroup const q_t * pq = sq;
+ threadgroup s_t * ps = ss;
+
+ pk += sgitg*(8*NS10);
+ ps += sgitg*(8*1);
+
+ static_assert((C/8) % NSG == 0, "");
+
+ constexpr short NC = (C/8)/NSG;
+
+ FOR_UNROLL (short cc = 0; cc < NC; ++cc) {
+ qk8x8_t mqk = make_filled_simdgroup_matrix<qk_t, 8>((qk_t) 0.0f);
+
+ if (DK % 16 != 0) {
+ k8x8_t mk;
+ q8x8_t mq;
+
+ FOR_UNROLL (short i = 0; i < DK8; ++i) {
+ simdgroup_barrier(mem_flags::mem_none);
+
+ simdgroup_load(mk, pk + 8*i, NS10, 0, true);
+ simdgroup_load(mq, pq + 8*i, DK);
+
+ simdgroup_barrier(mem_flags::mem_none);
+
+ simdgroup_multiply_accumulate(mqk, mq, mk, mqk);
+ }
+ } else {
+ k8x8_t mk[2];
+ q8x8_t mq[2];
+
+ // note: too much unroll can tank the performance for large heads
+ #pragma unroll (MIN(DK8/2, 4*NSG))
+ for (short i = 0; i < DK8/2; ++i) {
+ simdgroup_barrier(mem_flags::mem_none);
+
+ simdgroup_load(mq[0], pq + 0*8 + 16*i, DK);
+ simdgroup_load(mq[1], pq + 1*8 + 16*i, DK);
+
+ simdgroup_load(mk[0], pk + 0*8 + 16*i, NS10, 0, true);
+ simdgroup_load(mk[1], pk + 1*8 + 16*i, NS10, 0, true);
+
+ simdgroup_barrier(mem_flags::mem_none);
+
+ simdgroup_multiply_accumulate(mqk, mq[0], mk[0], mqk);
+ simdgroup_multiply_accumulate(mqk, mq[1], mk[1], mqk);
+ }
+ }
+
+ simdgroup_store(mqk, ps, SH, 0, false);
+
+ pk += 8*(NSG*NS10);
+ ps += 8*(NSG);
+ }
+ } else {
+ // TODO: this is the quantized K cache branch - not optimized yet
+ for (short ccc = 0; ccc < (C/8)/NSG; ++ccc) {
+ const short cc = ccc*NSG + sgitg;
+
+ const short tx = tiisg%4;
+ const short ty = tiisg/4;
+
+ qk8x8_t mqk = make_filled_simdgroup_matrix<qk_t, 8>((qk_t) 0.0f);
+
+ for (short ii = 0; ii < DK16; ii += 4) {
+ device const kd4x4_t * pk4x4 = (device const kd4x4_t *) (k + ((ic + 8*cc + ty)*args.nb11));
+
+ if (DK16%4 == 0) {
+ // the head is evenly divisible by 4*16 = 64, so no need for bound checks
+ {
+ k4x4_t tmp;
+ deq_k(pk4x4 + (ii + tx)/nl_k, (ii + tx)%nl_k, tmp);
+ sk4x4[4*ty + tx] = tmp;
+ }
+
+ simdgroup_barrier(mem_flags::mem_threadgroup);
+
+ FOR_UNROLL (short k = 0; k < 4; ++k) {
+ k8x8_t mk;
+ q8x8_t mq;
+
+ simdgroup_load(mk, sk + 16*k + 0*8, 4*16, 0, true); // transpose
+ simdgroup_load(mq, sq + (2*(ii + k) + 0)*8, DK);
+ simdgroup_multiply_accumulate(mqk, mq, mk, mqk);
+
+ simdgroup_load(mk, sk + 16*k + 1*8, 4*16, 0, true); // transpose
+ simdgroup_load(mq, sq + (2*(ii + k) + 1)*8, DK);
+ simdgroup_multiply_accumulate(mqk, mq, mk, mqk);
+ }
+ } else {
+ if (ii + tx < DK16) {
+ k4x4_t tmp;
+ deq_k(pk4x4 + (ii + tx)/nl_k, (ii + tx)%nl_k, tmp);
+ sk4x4[4*ty + tx] = tmp;
+ }
+
+ simdgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (short k = 0; k < 4 && ii + k < DK16; ++k) {
+ k8x8_t mk;
+ q8x8_t mq;
+
+ simdgroup_load(mk, sk + 16*k + 0*8, 4*16, 0, true); // transpose
+ simdgroup_load(mq, sq + (2*(ii + k) + 0)*8, DK);
+ simdgroup_multiply_accumulate(mqk, mq, mk, mqk);
+
+ simdgroup_load(mk, sk + 16*k + 1*8, 4*16, 0, true); // transpose
+ simdgroup_load(mq, sq + (2*(ii + k) + 1)*8, DK);
+ simdgroup_multiply_accumulate(mqk, mq, mk, mqk);
+ }
+ }
+ }
+
+ simdgroup_store(mqk, ss + 8*cc, SH, 0, false);
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // online softmax
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ const float m = M[jj];
+
+ // scale and apply the logitcap / mask
+ float2 s2 = ss2[j*SH/2 + tiisg]*args.scale;
+
+ if (FC_flash_attn_ext_has_scap) {
+ s2 = args.logit_softcap*precise::tanh(s2);
+ }
+
+ // mqk = mqk + slope*mask
+ if (blk_cur != 2) {
+ if (FC_flash_attn_ext_has_bias) {
+ s2 += s2_t(sm2[j*SH + tiisg])*slope;
+ } else {
+ s2 += s2_t(sm2[j*SH + tiisg]);
+ }
+ }
+
+ M[jj] = simd_max(max(M[jj], max(s2[0], s2[1])));
+
+ const float ms = exp(m - M[jj]);
+ const float2 vs2 = exp(s2 - M[jj]);
+
+ S[jj] = S[jj]*ms + simd_sum(vs2[0] + vs2[1]);
+
+ // the P matrix from the paper (Q rows, C columns)
+ ss2[j*SH/2 + tiisg] = vs2;
+
+ if (DV4 % NW == 0) {
+ FOR_UNROLL (short ii = 0; ii < DV4/NW; ++ii) {
+ const short i = ii*NW + tiisg;
+
+ so4[j*PV4 + i] *= ms;
+ }
+ } else {
+ for (short i = tiisg; i < DV4; i += NW) {
+ so4[j*PV4 + i] *= ms;
+ }
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // O = O + (Q*K^T)*V
+ {
+ // we can read directly from global memory
+ if (is_same<vd4x4_t, v4x4_t>::value) {
+ static_assert(PV8 % NSG == 0, "");
+
+ constexpr short NO = PV8/NSG;
+
+ o8x8_t lo[NO];
+
+ {
+ auto sot = so + 8*sgitg;
+
+ FOR_UNROLL (short ii = 0; ii < NO; ++ii) {
+ simdgroup_load(lo[ii], sot, PV, 0, false);
+
+ sot += 8*NSG;
+ }
+ }
+
+ {
+ device const v_t * pv = (device const v_t *) (v + ic*args.nb21);
+
+ pv += 8*sgitg;
+
+ if (DV <= 64) {
+ FOR_UNROLL (short cc = 0; cc < C/8; ++cc) {
+ s8x8_t vs;
+ simdgroup_load(vs, ss + 8*cc, SH, 0, false);
+
+ FOR_UNROLL (short ii = 0; ii < NO/2; ++ii) {
+ v8x8_t mv[2];
+
+ simdgroup_load(mv[0], pv + 0*NSG + 16*ii*NSG, NS20, 0, false);
+ simdgroup_load(mv[1], pv + 8*NSG + 16*ii*NSG, NS20, 0, false);
+
+ simdgroup_multiply_accumulate(lo[2*ii + 0], vs, mv[0], lo[2*ii + 0]);
+ simdgroup_multiply_accumulate(lo[2*ii + 1], vs, mv[1], lo[2*ii + 1]);
+ }
+
+ pv += 8*NS20;
+ }
+ } else {
+ constexpr short NC = (C/8)/2;
+
+ FOR_UNROLL (short cc = 0; cc < NC; ++cc) {
+ s8x8_t vs[2];
+
+ simdgroup_load(vs[0], ss + 16*cc + 0, SH, 0, false);
+ simdgroup_load(vs[1], ss + 16*cc + 8, SH, 0, false);
+
+ FOR_UNROLL (short ii = 0; ii < NO/2; ++ii) {
+ v8x8_t mv[4];
+
+ simdgroup_load(mv[0], pv + 0*NSG + 16*ii*NSG + 0*8*NS20, NS20, 0, false);
+ simdgroup_load(mv[1], pv + 8*NSG + 16*ii*NSG + 0*8*NS20, NS20, 0, false);
+ simdgroup_load(mv[2], pv + 0*NSG + 16*ii*NSG + 1*8*NS20, NS20, 0, false);
+ simdgroup_load(mv[3], pv + 8*NSG + 16*ii*NSG + 1*8*NS20, NS20, 0, false);
+
+ simdgroup_multiply_accumulate(lo[2*ii + 0], vs[0], mv[0], lo[2*ii + 0]);
+ simdgroup_multiply_accumulate(lo[2*ii + 1], vs[0], mv[1], lo[2*ii + 1]);
+ simdgroup_multiply_accumulate(lo[2*ii + 0], vs[1], mv[2], lo[2*ii + 0]);
+ simdgroup_multiply_accumulate(lo[2*ii + 1], vs[1], mv[3], lo[2*ii + 1]);
+ }
+
+ pv += 2*8*NS20;
+ }
+ }
+ }
+
+ {
+ auto sot = so + 8*sgitg;
+
+ FOR_UNROLL (short ii = 0; ii < NO; ++ii) {
+ simdgroup_store(lo[ii], sot, PV, 0, false);
+
+ sot += 8*NSG;
+ }
+ }
+ } else {
+ // TODO: this is the quantized V cache branch - not optimized yet
+
+ const short tx = tiisg%4;
+ const short ty = tiisg/4;
+
+ for (short cc = 0; cc < C/8; ++cc) {
+ s8x8_t vs;
+ simdgroup_load(vs, ss + 8*cc, SH, 0, false);
+
+ for (short ii = 4*sgitg; ii < DV16; ii += 4*NSG) {
+ device const vd4x4_t * pv4x4 = (device const vd4x4_t *) (v + ((ic + 8*cc + ty)*args.nb21));
+
+ if (DV16%4 == 0) {
+ // no need for bound checks
+ {
+ v4x4_t tmp;
+ deq_v(pv4x4 + (ii + tx)/nl_v, (ii + tx)%nl_v, tmp);
+ sv4x4[4*ty + tx] = tmp;
+ }
+
+ simdgroup_barrier(mem_flags::mem_threadgroup);
+
+ FOR_UNROLL (short k = 0; k < 4; ++k) {
+ v8x8_t mv[2];
+ o8x8_t lo[2];
+
+ simdgroup_load(mv[0], sv + 16*k + 0*8, 4*16, 0, false);
+ simdgroup_load(mv[1], sv + 16*k + 1*8, 4*16, 0, false);
+ simdgroup_load(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false);
+ simdgroup_load(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false);
+
+ simdgroup_multiply_accumulate(lo[0], vs, mv[0], lo[0]);
+ simdgroup_multiply_accumulate(lo[1], vs, mv[1], lo[1]);
+
+ simdgroup_store(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false);
+ simdgroup_store(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false);
+ }
+ } else {
+ if (ii + tx < DV16) {
+ v4x4_t tmp;
+ deq_v(pv4x4 + (ii + tx)/nl_v, (ii + tx)%nl_v, tmp);
+ sv4x4[4*ty + tx] = tmp;
+ }
+
+ simdgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (short k = 0; k < 4 && ii + k < DV16; ++k) {
+ v8x8_t mv[2];
+ o8x8_t lo[2];
+
+ simdgroup_load(mv[0], sv + 16*k + 0*8, 4*16, 0, false);
+ simdgroup_load(mv[1], sv + 16*k + 1*8, 4*16, 0, false);
+ simdgroup_load(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false);
+ simdgroup_load(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false);
+
+ simdgroup_multiply_accumulate(lo[0], vs, mv[0], lo[0]);
+ simdgroup_multiply_accumulate(lo[1], vs, mv[1], lo[1]);
+
+ simdgroup_store(lo[0], so + 8*(2*(ii + k) + 0), PV, 0, false);
+ simdgroup_store(lo[1], so + 8*(2*(ii + k) + 1), PV, 0, false);
+ }
+ }
+ }
+ }
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ if (FC_flash_attn_ext_has_sinks) {
+ FOR_UNROLL (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+
+ const float m = M[jj];
+ const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2;
+
+ M[jj] = simd_max(max(M[jj], s));
+
+ const float ms = exp(m - M[jj]);
+ const float vs = exp(s - M[jj]);
+
+ S[jj] = S[jj]*ms + simd_sum(vs);
+
+ for (short i = tiisg; i < DV4; i += NW) {
+ so4[j*PV4 + i] *= ms;
+ }
+ }
+ }
+ }
+
+ // store to global memory
+ for (short jj = 0; jj < NQ; ++jj) {
+ const short j = jj*NSG + sgitg;
+ if (iq1 + j >= args.ne01) {
+ break;
+ }
+
+ device float4 * dst4 = (device float4 *) dst + ((uint64_t)iq3*args.ne2*args.ne1 + iq2 + (uint64_t)(iq1 + j)*args.ne1)*DV4;
+
+ const float scale = S[jj] == 0.0 ? 0.0f : 1.0f/S[jj];
+
+ if (DV4 % NW == 0) {
+ FOR_UNROLL (short ii = 0; ii < DV4/NW; ++ii) {
+ const short i = ii*NW + tiisg;
+
+ dst4[i] = (float4) so4[j*PV4 + i]*scale;
+ }
+ } else {
+ for (short i = tiisg; i < DV4; i += NW) {
+ dst4[i] = (float4) so4[j*PV4 + i]*scale;
+ }
+ }
+ }
+
+#undef NS10
+#undef NS20
+}
+
+template<
+ typename q_t, // query types in shared memory
+ typename q4_t,
+ typename q8x8_t,
+ typename k_t, // key types in shared memory
+ typename k4x4_t,
+ typename k8x8_t,
+ typename v_t, // value types in shared memory
+ typename v4x4_t,
+ typename v8x8_t,
+ typename qk_t, // Q*K types
+ typename qk8x8_t,
+ typename s_t, // soft-max types
+ typename s2_t,
+ typename s8x8_t,
+ typename o_t, // attention accumulation types
+ typename o4_t,
+ typename o8x8_t,
+ typename kd4x4_t, // key type in device memory
+ short nl_k,
+ void (*deq_k)(device const kd4x4_t *, short, thread k4x4_t &),
+ typename vd4x4_t, // value type in device memory
+ short nl_v,
+ void (*deq_v)(device const vd4x4_t *, short, thread v4x4_t &),
+ short DK, // K head size
+ short DV, // V head size
+ short Q = OP_FLASH_ATTN_EXT_NQPSG, // queries per threadgroup
+ short C = OP_FLASH_ATTN_EXT_NCPSG> // cache items per threadgroup
+kernel void kernel_flash_attn_ext(
+ constant ggml_metal_kargs_flash_attn_ext & args,
+ device const char * q,
+ device const char * k,
+ device const char * v,
+ device const char * mask,
+ device const char * sinks,
+ device const char * pad,
+ device const char * blk,
+ device char * dst,
+ threadgroup half * shmem_f16 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+#define FWD_TMPL q_t, q4_t, q8x8_t, k_t, k4x4_t, k8x8_t, v_t, v4x4_t, v8x8_t, qk_t, qk8x8_t, s_t, s2_t, s8x8_t, o_t, o4_t, o8x8_t, kd4x4_t, nl_k, deq_k, vd4x4_t, nl_v, deq_v, DK, DV, Q, C
+#define FWD_ARGS args, q, k, v, mask, sinks, pad, blk, dst, shmem_f16, tgpig, tiisg, sgitg
+ switch (FC_flash_attn_ext_nsg) {
+ // note: disabled cases to reduce library load time
+ //case 1: kernel_flash_attn_ext_impl<FWD_TMPL, 1>(FWD_ARGS); break;
+ //case 2: kernel_flash_attn_ext_impl<FWD_TMPL, 2>(FWD_ARGS); break;
+ case 4: kernel_flash_attn_ext_impl<FWD_TMPL, 4>(FWD_ARGS); break;
+ case 8: kernel_flash_attn_ext_impl<FWD_TMPL, 8>(FWD_ARGS); break;
+ }
+#undef FWD_TMPL
+#undef FWD_ARGS
+}
+
+// TODO: this is quite ugly. in the future these types will be hardcoded in the kernel, but for now keep them as
+// template to be able to explore different combinations
+//
+#define FA_TYPES \
+ half, half4, simdgroup_half8x8, \
+ half, half4x4, simdgroup_half8x8, \
+ half, half4x4, simdgroup_half8x8, \
+ float, simdgroup_float8x8, \
+ float, float2, simdgroup_float8x8, \
+ float, float4, simdgroup_float8x8
+ //half, half4, simdgroup_half8x8
+
+#define FA_TYPES_BF \
+ bfloat, bfloat4, simdgroup_bfloat8x8, \
+ bfloat, bfloat4x4, simdgroup_bfloat8x8, \
+ bfloat, bfloat4x4, simdgroup_bfloat8x8, \
+ float, simdgroup_float8x8, \
+ float, float2, simdgroup_float8x8, \
+ half, half4, simdgroup_half8x8
+ //float, float4, simdgroup_float8x8
+
+#define FA_TYPES_F32 \
+ half, half4, simdgroup_half8x8, \
+ float, float4x4, simdgroup_float8x8, \
+ float, float4x4, simdgroup_float8x8, \
+ float, simdgroup_float8x8, \
+ float, float2, simdgroup_float8x8, \
+ float, float4, simdgroup_float8x8
+ //half, half4, simdgroup_half8x8
+
+typedef decltype(kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 64, 64>) flash_attn_ext_t;
+
+template [[host_name("kernel_flash_attn_ext_f32_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_f32_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_f32_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_f32_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_f32_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_f32_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_f32_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_f32_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_f32_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_f32_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_f32_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_f32_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_f32_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_F32, float4x4, 1, dequantize_f32, float4x4, 1, dequantize_f32, 576, 512>;
+
+template [[host_name("kernel_flash_attn_ext_f16_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_f16_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_f16_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_f16_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_f16_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_f16_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_f16_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_f16_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_f16_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_f16_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_f16_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_f16_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_f16_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, half4x4, 1, dequantize_f16, half4x4, 1, dequantize_f16, 576, 512>;
+
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_bf16_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_bf16_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES_BF, bfloat4x4, 1, dequantize_bf16, bfloat4x4, 1, dequantize_bf16, 576, 512>;
+#endif
+
+template [[host_name("kernel_flash_attn_ext_q4_0_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_q4_0_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_0, 2, dequantize_q4_0, block_q4_0, 2, dequantize_q4_0, 576, 512>;
+
+template [[host_name("kernel_flash_attn_ext_q4_1_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_q4_1_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q4_1, 2, dequantize_q4_1, block_q4_1, 2, dequantize_q4_1, 576, 512>;
+
+template [[host_name("kernel_flash_attn_ext_q5_0_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_q5_0_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_0, 2, dequantize_q5_0, block_q5_0, 2, dequantize_q5_0, 576, 512>;
+
+template [[host_name("kernel_flash_attn_ext_q5_1_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_q5_1_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q5_1, 2, dequantize_q5_1, block_q5_1, 2, dequantize_q5_1, 576, 512>;
+
+template [[host_name("kernel_flash_attn_ext_q8_0_dk32_dv32" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 32, 32>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk40_dv40" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 40, 40>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk48_dv48" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 48, 48>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk64_dv64" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 64, 64>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk72_dv72" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 72, 72>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk80_dv80" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 80, 80>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk96_dv96" )]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 96, 96>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk112_dv112")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 112, 112>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk128_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 128, 128>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk192_dv192")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 192, 192>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk192_dv128")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 192, 128>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk256_dv256")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 256, 256>;
+template [[host_name("kernel_flash_attn_ext_q8_0_dk576_dv512")]] kernel flash_attn_ext_t kernel_flash_attn_ext<FA_TYPES, block_q8_0, 2, dequantize_q8_0, block_q8_0, 2, dequantize_q8_0, 576, 512>;
+
+#undef FA_TYPES
+#undef FA_TYPES_BF
+#undef FA_TYPES_F32
+
+constant bool FC_flash_attn_ext_vec_has_mask [[function_constant(FC_FLASH_ATTN_EXT_VEC + 0)]];
+constant bool FC_flash_attn_ext_vec_has_sinks [[function_constant(FC_FLASH_ATTN_EXT_VEC + 1)]];
+constant bool FC_flash_attn_ext_vec_has_bias [[function_constant(FC_FLASH_ATTN_EXT_VEC + 2)]];
+constant bool FC_flash_attn_ext_vec_has_scap [[function_constant(FC_FLASH_ATTN_EXT_VEC + 3)]];
+constant bool FC_flash_attn_ext_vec_has_kvpad [[function_constant(FC_FLASH_ATTN_EXT_VEC + 4)]];
+
+//constant float FC_flash_attn_ext_vec_scale [[function_constant(FC_FLASH_ATTN_EXT_VEC + 10)]];
+//constant float FC_flash_attn_ext_vec_max_bias [[function_constant(FC_FLASH_ATTN_EXT_VEC + 11)]];
+//constant float FC_flash_attn_ext_vec_logit_softcap [[function_constant(FC_FLASH_ATTN_EXT_VEC + 12)]];
+
+constant int32_t FC_flash_attn_ext_vec_ns10 [[function_constant(FC_FLASH_ATTN_EXT_VEC + 20)]];
+constant int32_t FC_flash_attn_ext_vec_ns20 [[function_constant(FC_FLASH_ATTN_EXT_VEC + 21)]];
+constant int32_t FC_flash_attn_ext_vec_nsg [[function_constant(FC_FLASH_ATTN_EXT_VEC + 22)]];
+constant int32_t FC_flash_attn_ext_vec_nwg [[function_constant(FC_FLASH_ATTN_EXT_VEC + 23)]];
+
+template<
+ typename q4_t, // query types in shared memory
+ typename k4_t, // key types in shared memory
+ typename v4_t, // value types in shared memory
+ typename qk_t, // Q*K types
+ typename s_t, // soft-max types
+ typename s4_t,
+ typename o4_t, // attention accumulation types
+ typename kd4_t, // key type in device memory
+ short nl_k,
+ void (*deq_k_t4)(device const kd4_t *, short, thread k4_t &),
+ typename vd4_t, // value type in device memory
+ short nl_v,
+ void (*deq_v_t4)(device const vd4_t *, short, thread v4_t &),
+ short DK, // K head size
+ short DV, // V head size
+ short NE = 4, // head elements per thread
+ short Q = OP_FLASH_ATTN_EXT_VEC_NQPSG, // queries per threadgroup
+ short C = OP_FLASH_ATTN_EXT_VEC_NCPSG> // cache items per threadgroup
+kernel void kernel_flash_attn_ext_vec(
+ constant ggml_metal_kargs_flash_attn_ext_vec & args,
+ device const char * q,
+ device const char * k,
+ device const char * v,
+ device const char * mask,
+ device const char * sinks,
+ device const char * pad,
+ device char * dst,
+ threadgroup half * shmem_f16 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ static_assert(DK % 32 == 0, "DK must be divisible by 32");
+ static_assert(DV % 32 == 0, "DV must be divisible by 32");
+
+#define NWG (FC_flash_attn_ext_vec_nwg)
+#define NSG (FC_flash_attn_ext_vec_nsg)
+
+#define NS10 (FC_flash_attn_ext_vec_ns10)
+#define NS20 (FC_flash_attn_ext_vec_ns20)
+
+ const short iwg = tgpig[2]%NWG;
+
+ const ushort iq3 = tgpig[2]/NWG;
+ const ushort iq2 = tgpig[1];
+ const ushort iq1 = tgpig[0];
+
+ constexpr short DK4 = DK/4;
+ constexpr short DV4 = DV/4;
+
+ constexpr short PK = PAD2(DK, 128);
+ constexpr short PK4 = PK/4;
+
+ constexpr short PV = PAD2(DV, 128);
+ constexpr short PV4 = PV/4;
+
+ constexpr short NW = N_SIMDWIDTH;
+ constexpr short NL = NW/NE; // note: this can be adjusted to support different head sizes and simdgroup work loads
+ constexpr short SH = 4*C; // shared memory per simdgroup
+
+ static_assert(DK4 % NL == 0, "DK4 must be divisible by NL");
+ static_assert(DV4 % NL == 0, "DV4 must be divisible by NL");
+
+ const short T = PK + NSG*SH; // shared memory size per query in (half)
+
+ //threadgroup q_t * sq = (threadgroup q_t *) (shmem_f16 + 0*PK); // holds the query data
+ threadgroup q4_t * sq4 = (threadgroup q4_t *) (shmem_f16 + 0*PK); // same as above but in q4_t
+ threadgroup s_t * ss = (threadgroup s_t *) (shmem_f16 + sgitg*SH + NSG*PK); // scratch buffer for attention
+ threadgroup s4_t * ss4 = (threadgroup s4_t *) (shmem_f16 + sgitg*SH + NSG*PK); // same as above but in s4_t
+ threadgroup half * sm = (threadgroup half *) (shmem_f16 + sgitg*SH + 2*C + NSG*PK); // scratch buffer for mask
+ threadgroup o4_t * so4 = (threadgroup o4_t *) (shmem_f16 + 2*sgitg*PV + NSG*PK + NSG*SH); // scratch buffer for the results
+
+ // store the result for all queries in shared memory (the O matrix from the paper)
+ so4 += tiisg;
+
+ {
+ q += iq1*args.nb01 + iq2*args.nb02 + iq3*args.nb03;
+
+ const short ikv2 = iq2/(args.ne02/args.ne_12_2);
+ const short ikv3 = iq3/(args.ne03/args.ne_12_3);
+
+ k += ikv2*args.nb12 + ikv3*args.nb13;
+ v += ikv2*args.nb22 + ikv3*args.nb23;
+ }
+
+ // load heads from Q to shared memory
+ device const float4 * q4 = (device const float4 *) ((device const char *) q);
+
+ if (iq1 < args.ne01) {
+ for (short i = tiisg; i < PK4; i += NW) {
+ if (i < DK4) {
+ sq4[i] = (q4_t) q4[i];
+ } else {
+ sq4[i] = (q4_t) 0.0f;
+ }
+ }
+ }
+
+ // zero out so
+ for (short i = 0; i < DV4/NL; ++i) {
+ so4[i*NL] = (o4_t) 0.0f;
+ }
+
+ // zero out shared memory SH
+ for (short i = tiisg; i < SH/4; i += NW) {
+ ss4[i] = (s4_t) 0.0f;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ {
+ float S = 0.0f;
+ float M = -FLT_MAX/2;
+
+ // thread indices inside the simdgroup
+ const short tx = tiisg%NL;
+ const short ty = tiisg/NL;
+
+ // pointer to the mask
+ device const half * pm = (device const half *) (mask + iq1*args.nb31 + (iq2%args.ne32)*args.nb32 + (iq3%args.ne33)*args.nb33);
+
+ float slope = 1.0f;
+
+ // ALiBi
+ if (FC_flash_attn_ext_vec_has_bias) {
+ const short h = iq2;
+
+ const float base = h < args.n_head_log2 ? args.m0 : args.m1;
+ const short exph = h < args.n_head_log2 ? h + 1 : 2*(h - args.n_head_log2) + 1;
+
+ slope = pow(base, exph);
+ }
+
+ // loop over the KV cache
+ // each simdgroup handles blocks of Q rows and C columns
+ for (int ic0 = iwg*NSG + sgitg; ; ic0 += NWG*NSG) {
+ int ic = ic0*C;
+ if (ic >= args.ne11) {
+ break;
+ }
+
+ // the last partial chunk uses the pad buffer as source
+ if (FC_flash_attn_ext_vec_has_kvpad && ic + C > args.ne11) {
+ k = pad;
+ v = k + args.nb11*C*args.ne_12_2*args.ne_12_3;
+ mask = v + args.nb21*C*args.ne_12_2*args.ne_12_3;
+
+ const short ikv2 = iq2/(args.ne02/args.ne_12_2);
+ const short ikv3 = iq3/(args.ne03/args.ne_12_3);
+
+ k += (ikv2 + ikv3*args.ne_12_2)*args.nb11*C;
+ v += (ikv2 + ikv3*args.ne_12_2)*args.nb21*C;
+
+ if (!FC_flash_attn_ext_vec_has_mask) {
+ if (ic + tiisg >= args.ne11) {
+ sm[tiisg] = -MAXHALF;
+ }
+ } else {
+ pm = (device const half *) (mask) +
+ iq1*C +
+ (iq2%args.ne32)*(C*args.ne31) +
+ (iq3%args.ne33)*(C*args.ne31*args.ne32);
+ }
+
+ ic = 0;
+ }
+
+ if (FC_flash_attn_ext_vec_has_mask) {
+ sm[tiisg] = pm[ic + tiisg];
+ }
+
+ // skip -INF blocks
+ if (simd_max(sm[tiisg]) <= -MAXHALF) {
+ continue;
+ }
+
+ // Q*K^T
+ {
+ device const k4_t * pk4 = (device const k4_t *) (k + ic*args.nb11);
+ threadgroup const q4_t * pq4 = sq4;
+
+ pk4 += ty*NS10/4 + tx;
+ pq4 += tx;
+
+ qk_t mqk[C/NE] = { [ 0 ... C/NE - 1] = 0.0f };
+
+ // each simdgroup processes 1 query and NE (NW/NL) cache elements
+ FOR_UNROLL (short cc = 0; cc < C/NE; ++cc) {
+ if (is_same<kd4_t, k4_t>::value) {
+ FOR_UNROLL (short ii = 0; ii < DK4/NL; ++ii) {
+ mqk[cc] += dot((float4) pk4[cc*NE*NS10/4 + ii*NL], (float4) pq4[ii*NL]);
+ }
+ } else {
+ device const kd4_t * pk = (device const kd4_t *) (k + ((ic + NE*cc + ty)*args.nb11));
+
+ k4_t mk;
+
+ FOR_UNROLL (short ii = 0; ii < DK4/NL; ++ii) {
+ const short i = ii*NL + tx;
+
+ deq_k_t4(pk + i/nl_k, i%nl_k, mk);
+
+ mqk[cc] += dot((float4) mk, (float4) sq4[i]);
+ }
+ }
+
+ if (NE == 1) {
+ mqk[cc] = simd_sum(mqk[cc]);
+ } else {
+ // simdgroup reduce (NE = 4)
+ // [ 0 .. 7] -> [ 0]
+ // [ 8 .. 15] -> [ 8]
+ // [16 .. 23] -> [16]
+ // [24 .. 31] -> [24]
+ if (NE <= 1) {
+ mqk[cc] += simd_shuffle_down(mqk[cc], 16);
+ }
+ if (NE <= 2) {
+ mqk[cc] += simd_shuffle_down(mqk[cc], 8);
+ }
+ if (NE <= 4) {
+ mqk[cc] += simd_shuffle_down(mqk[cc], 4);
+ }
+ if (NE <= 8) {
+ mqk[cc] += simd_shuffle_down(mqk[cc], 2);
+ }
+ if (NE <= 16) {
+ mqk[cc] += simd_shuffle_down(mqk[cc], 1);
+ }
+
+ // broadcast
+ mqk[cc] = simd_shuffle(mqk[cc], NL*ty);
+ }
+ }
+
+ if (FC_flash_attn_ext_vec_has_mask &&
+ !FC_flash_attn_ext_vec_has_scap &&
+ !FC_flash_attn_ext_vec_has_bias) {
+ ss[NE*tx + ty] = fma(mqk[tx], args.scale, (qk_t) sm[NE*tx + ty]);
+ } else {
+ mqk[tx] *= args.scale;
+
+ if (FC_flash_attn_ext_vec_has_scap) {
+ mqk[tx] = args.logit_softcap*precise::tanh(mqk[tx]);
+ }
+
+ if (FC_flash_attn_ext_vec_has_bias) {
+ mqk[tx] += (qk_t) sm[NE*tx + ty]*slope;
+ } else {
+ mqk[tx] += (qk_t) sm[NE*tx + ty];
+ }
+
+ ss[NE*tx + ty] = mqk[tx];
+ }
+ }
+
+ simdgroup_barrier(mem_flags::mem_threadgroup);
+
+ // online softmax
+ {
+ const float m = M;
+ const float s = ss[tiisg];
+
+ M = simd_max(max(M, s));
+
+ const float ms = exp(m - M);
+ const float vs = exp(s - M);
+
+ S = S*ms + simd_sum(vs);
+
+ // the P matrix from the paper (Q rows, C columns)
+ ss[tiisg] = vs;
+
+ // O = diag(ms)*O
+ if ((DV4/NL % NW == 0) || ty == 0) {
+ FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) {
+ so4[ii*NL] *= ms;
+ }
+ }
+ }
+
+ simdgroup_barrier(mem_flags::mem_threadgroup);
+
+ // O = O + (Q*K^T)*V
+ {
+ o4_t lo[DV4/NL];
+ FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) {
+ lo[ii] = 0.0f;
+ }
+
+ if (is_same<vd4_t, v4_t>::value) {
+ device const v4_t * pv4 = (device const v4_t *) (v + ic*args.nb21);
+
+ pv4 += ty*NS20/4 + tx;
+
+ const auto sst = ss + ty;
+
+ FOR_UNROLL (short cc = 0; cc < C/NE; ++cc) {
+ FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) {
+ lo[ii] += o4_t(float4(pv4[cc*NE*NS20/4 + ii*NL])*float4(sst[cc*NE]));
+ }
+ }
+ } else {
+ FOR_UNROLL (short cc = 0; cc < C/NE; ++cc) {
+ device const vd4_t * pv4 = (device const vd4_t *) (v + ((ic + NE*cc + ty)*args.nb21));
+
+ FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) {
+ const short i = ii*NL + tx;
+
+ v4_t mv;
+ deq_v_t4(pv4 + i/nl_v, i%nl_v, mv);
+
+ lo[ii] += o4_t(float4(mv)*float4(ss[NE*cc + ty]));
+ }
+ }
+ }
+
+ FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) {
+ if (NE > 1) {
+ lo[ii][0] += simd_shuffle_down(lo[ii][0], 16);
+ lo[ii][1] += simd_shuffle_down(lo[ii][1], 16);
+ lo[ii][2] += simd_shuffle_down(lo[ii][2], 16);
+ lo[ii][3] += simd_shuffle_down(lo[ii][3], 16);
+ }
+
+ if (NE > 2) {
+ lo[ii][0] += simd_shuffle_down(lo[ii][0], 8);
+ lo[ii][1] += simd_shuffle_down(lo[ii][1], 8);
+ lo[ii][2] += simd_shuffle_down(lo[ii][2], 8);
+ lo[ii][3] += simd_shuffle_down(lo[ii][3], 8);
+ }
+
+ if (NE > 4) {
+ lo[ii][0] += simd_shuffle_down(lo[ii][0], 4);
+ lo[ii][1] += simd_shuffle_down(lo[ii][1], 4);
+ lo[ii][2] += simd_shuffle_down(lo[ii][2], 4);
+ lo[ii][3] += simd_shuffle_down(lo[ii][3], 4);
+ }
+
+ if (NE > 8) {
+ lo[ii][0] += simd_shuffle_down(lo[ii][0], 2);
+ lo[ii][1] += simd_shuffle_down(lo[ii][1], 2);
+ lo[ii][2] += simd_shuffle_down(lo[ii][2], 2);
+ lo[ii][3] += simd_shuffle_down(lo[ii][3], 2);
+ }
+
+ if (NE > 16) {
+ lo[ii][0] += simd_shuffle_down(lo[ii][0], 1);
+ lo[ii][1] += simd_shuffle_down(lo[ii][1], 1);
+ lo[ii][2] += simd_shuffle_down(lo[ii][2], 1);
+ lo[ii][3] += simd_shuffle_down(lo[ii][3], 1);
+ }
+ }
+
+ if ((DV4/NL % NW == 0) || ty == 0) {
+ FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) {
+ so4[ii*NL] += lo[ii];
+ }
+ }
+ }
+ }
+
+ if (FC_flash_attn_ext_vec_has_sinks && sgitg == 0 && iwg == 0) {
+ const float m = M;
+ const float s = tiisg == 0 ? ((device const float *) sinks)[iq2] : -FLT_MAX/2;
+
+ M = simd_max(max(M, s));
+
+ const float ms = exp(m - M);
+ const float vs = exp(s - M);
+
+ S = S*ms + simd_sum(vs);
+
+ if ((DV4/NL % NW == 0) || ty == 0) {
+ FOR_UNROLL (short ii = 0; ii < DV4/NL; ++ii) {
+ so4[ii*NL] *= ms;
+ }
+ }
+ }
+
+ // these are needed for reducing the results from the simdgroups (reuse the ss buffer)
+ if (tiisg == 0) {
+ ss[0] = (s_t) S;
+ ss[1] = (s_t) M;
+ }
+ }
+
+ so4 -= tiisg;
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // parallel reduce
+ for (short r = NSG/2; r > 0; r >>= 1) {
+ if (sgitg < r) {
+ const float S0 = ss[ 0];
+ const float S1 = ss[r*(SH/2) + 0];
+
+ const float M0 = ss[ 1];
+ const float M1 = ss[r*(SH/2) + 1];
+
+ const float M = max(M0, M1);
+
+ const float ms0 = exp(M0 - M);
+ const float ms1 = exp(M1 - M);
+
+ const float S = S0*ms0 + S1*ms1;
+
+ if (tiisg == 0) {
+ ss[0] = S;
+ ss[1] = M;
+ }
+
+ // O_0 = diag(ms0)*O_0 + diag(ms1)*O_1
+ for (short i = tiisg; i < DV4; i += NW) {
+ so4[i] = so4[i]*ms0 + so4[i + r*PV4]*ms1;
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ // final rescale with 1/S and store to global memory
+ if (sgitg == 0) {
+ const int64_t nrows = args.ne3*args.ne2*args.ne1;
+ const int64_t rid = iq3*args.ne2*args.ne1 + iq2 + iq1*args.ne1;
+
+ device float4 * dst4 = (device float4 *) dst;
+ device float * dst1 = (device float *) dst + nrows*DV*NWG; // the S and M are stored after the results
+
+ const float S = NWG == 1 ? (ss[0] == 0.0f ? 0.0f : 1.0f/ss[0]) : 1.0f;
+
+ // interleave the workgroup data
+ for (short i = tiisg; i < DV4; i += NW) {
+ dst4[rid*DV4*NWG + NWG*i + iwg] = (float4) so4[i]*S;
+ }
+
+ // store S and M
+ if (NWG > 1) {
+ if (tiisg == 0) {
+ dst1[rid*(2*NWG) + 2*iwg + 0] = ss[0];
+ dst1[rid*(2*NWG) + 2*iwg + 1] = ss[1];
+ }
+ }
+ }
+
+#undef NWG
+#undef NSG
+#undef NS10
+#undef NS20
+}
+
+// note: I think the s_t can be half instead of float, because the Q*K scaling is done before storing to shared mem
+// in the other (non-vec) kernel, we need s_t to also be float because we scale during the soft_max
+//
+#define FA_TYPES \
+ half4, \
+ half4, \
+ half4, \
+ float, \
+ float, float4, \
+ float4
+
+#define FA_TYPES_F32 \
+ half4, \
+ float4, \
+ float4, \
+ float, \
+ float, float4, \
+ float4
+
+typedef decltype(kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 128, 128, 4>) flash_attn_ext_vec_t;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 32, 32, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 32, 32, 4>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 32, 32, 4>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 32, 32, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 32, 32, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 32, 32, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 32, 32, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk32_dv32")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 32, 32, 4>;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 64, 64, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 64, 64, 2>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 64, 64, 2>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 64, 64, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 64, 64, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 64, 64, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 64, 64, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk64_dv64")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 64, 64, 2>;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 96, 96, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 96, 96, 4>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 96, 96, 4>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 96, 96, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 96, 96, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 96, 96, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 96, 96, 4>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk96_dv96")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 96, 96, 4>;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 128, 128, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 128, 128, 1>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 128, 128, 1>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 128, 128, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 128, 128, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 128, 128, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 128, 128, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk128_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 128, 128, 1>;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 192, 192, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 192, 192, 2>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 192, 192, 2>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 192, 192, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 192, 192, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 192, 192, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 192, 192, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk192_dv192")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 192, 192, 2>;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 192, 128, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 192, 128, 2>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 192, 128, 2>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 192, 128, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 192, 128, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 192, 128, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 192, 128, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk192_dv128")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 192, 128, 2>;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 256, 256, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 256, 256, 1>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 256, 256, 1>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 256, 256, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 256, 256, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 256, 256, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 256, 256, 1>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk256_dv256")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 256, 256, 1>;
+
+template [[host_name("kernel_flash_attn_ext_vec_f32_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES_F32, float4, 1, dequantize_f32_t4, float4, 1, dequantize_f32_t4, 576, 512, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_f16_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, half4, 1, dequantize_f16_t4, half4, 1, dequantize_f16_t4, 576, 512, 2>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_flash_attn_ext_vec_bf16_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, bfloat4, 1, dequantize_bf16_t4, bfloat4, 1, dequantize_bf16_t4, 576, 512, 2>;
+#endif
+template [[host_name("kernel_flash_attn_ext_vec_q4_0_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_0, 8, dequantize_q4_0_t4, block_q4_0, 8, dequantize_q4_0_t4, 576, 512, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q4_1_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q4_1, 8, dequantize_q4_1_t4, block_q4_1, 8, dequantize_q4_1_t4, 576, 512, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_0_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_0, 8, dequantize_q5_0_t4, block_q5_0, 8, dequantize_q5_0_t4, 576, 512, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q5_1_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q5_1, 8, dequantize_q5_1_t4, block_q5_1, 8, dequantize_q5_1_t4, 576, 512, 2>;
+template [[host_name("kernel_flash_attn_ext_vec_q8_0_dk576_dv512")]] kernel flash_attn_ext_vec_t kernel_flash_attn_ext_vec<FA_TYPES, block_q8_0, 8, dequantize_q8_0_t4, block_q8_0, 8, dequantize_q8_0_t4, 576, 512, 2>;
+
+#undef FA_TYPES
+#undef FA_TYPES_F32
+
+constant int32_t FC_flash_attn_ext_vec_reduce_DV [[function_constant(FC_FLASH_ATTN_EXT_VEC_REDUCE + 0)]];
+constant int32_t FC_flash_attn_ext_vec_reduce_NWG [[function_constant(FC_FLASH_ATTN_EXT_VEC_REDUCE + 1)]];
+
+kernel void kernel_flash_attn_ext_vec_reduce(
+ constant ggml_metal_kargs_flash_attn_ext_vec_reduce & args,
+ device const char * htmp,
+ device char * dst,
+ uint tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+#define NWG (FC_flash_attn_ext_vec_reduce_NWG)
+#define DV (FC_flash_attn_ext_vec_reduce_DV)
+
+ const uint64_t rid = tgpig;
+
+ const short iwg = tiisg;
+
+ device const float * ss = (device const float *) htmp + (uint64_t)args.nrows*DV*NWG;
+
+ float S = ss[rid*(2*NWG) + 2*iwg + 0];
+ float M = ss[rid*(2*NWG) + 2*iwg + 1];
+
+ const float m = simd_max(M);
+ const float ms = exp(M - m);
+
+ S = simd_sum(S*ms);
+ S = S == 0.0f ? 0.0f : 1.0f/S;
+
+ const short DV4 = DV/4;
+
+ device const float4 * htmp4 = (device const float4 *) htmp + rid*DV4*NWG;
+ device float4 * dst4 = (device float4 *) dst + rid*DV4;
+
+ for (short i = sgitg; i < DV4; i += NWG) {
+ const float4 v = simd_sum(htmp4[i*NWG + iwg]*ms);
+
+ if (iwg == 0) {
+ dst4[i] = v*S;
+ }
+ }
+
+#undef NWG
+#undef DV
+}
+
+template<typename T0, typename T1>
+kernel void kernel_cpy_t_t(
+ constant ggml_metal_kargs_cpy & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int i03 = tgpig[2];
+ const int i02 = tgpig[1];
+ const int i01 = ntg[1] == 1 ? tgpig[0]%args.ne01 : tgpig[0]*ntg[1] + tiitg/ntg[0];
+ const int iw0 = ntg[1] == 1 ? tgpig[0]/args.ne01 : 0;
+
+ const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00;
+
+ const int64_t i3 = n/(args.ne2*args.ne1*args.ne0);
+ const int64_t i2 = (n - i3*args.ne2*args.ne1*args.ne0)/(args.ne1*args.ne0);
+ const int64_t i1 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0)/args.ne0;
+ const int64_t i0 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0 - i1*args.ne0);
+
+ device T1 * dst_data = (device T1 *) (dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ for (int64_t i00 = iw0*ntg[0] + tiitg%ntg[0]; i00 < args.ne00; ) {
+ device const T0 * src = (device T0 *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + i00*args.nb00);
+ dst_data[i00] = (T1) src[0];
+ break;
+ }
+}
+
+typedef decltype(kernel_cpy_t_t<float, float>) kernel_cpy_t;
+
+template [[host_name("kernel_cpy_f32_f32")]] kernel kernel_cpy_t kernel_cpy_t_t<float, float>;
+template [[host_name("kernel_cpy_f32_f16")]] kernel kernel_cpy_t kernel_cpy_t_t<float, half>;
+template [[host_name("kernel_cpy_f32_i32")]] kernel kernel_cpy_t kernel_cpy_t_t<float, int32_t>;
+template [[host_name("kernel_cpy_i32_f32")]] kernel kernel_cpy_t kernel_cpy_t_t<int32_t, float>;
+template [[host_name("kernel_cpy_i32_i32")]] kernel kernel_cpy_t kernel_cpy_t_t<int32_t, int32_t>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_cpy_f32_bf16")]] kernel kernel_cpy_t kernel_cpy_t_t<float, bfloat>;
+#endif
+template [[host_name("kernel_cpy_f16_f32")]] kernel kernel_cpy_t kernel_cpy_t_t<half, float>;
+template [[host_name("kernel_cpy_f16_f16")]] kernel kernel_cpy_t kernel_cpy_t_t<half, half>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_cpy_bf16_f32")]] kernel kernel_cpy_t kernel_cpy_t_t<bfloat, float>;
+template [[host_name("kernel_cpy_bf16_bf16")]] kernel kernel_cpy_t kernel_cpy_t_t<bfloat, bfloat>;
+#endif
+
+template<short QK,
+ typename block_q,
+ void (*quantize_func)(device const float *, device block_q &)>
+kernel void kernel_cpy_f32_q(
+ constant ggml_metal_kargs_cpy & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int i03 = tgpig[2];
+ const int i02 = tgpig[1];
+ const int i01 = ntg[1] == 1 ? tgpig[0]%args.ne01 : tgpig[0]*ntg[1] + tiitg/ntg[0];
+ const int iw0 = ntg[1] == 1 ? tgpig[0]/args.ne01 : 0;
+
+ const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00;
+
+ const int64_t i3 = n / (args.ne2*args.ne1*args.ne0);
+ const int64_t i2 = (n - i3*args.ne2*args.ne1*args.ne0) / (args.ne1*args.ne0);
+ const int64_t i1 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0) / args.ne0;
+ const int64_t i0 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0 - i1*args.ne0)/QK;
+
+ device block_q * dst_data = (device block_q *)(dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ for (int64_t i00 = iw0*ntg[0] + tiitg%ntg[0]; i00 < args.nk0; ) {
+ device const float * src = (device const float *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01 + (i00*QK)*args.nb00);
+
+ quantize_func(src, dst_data[i00]);
+
+ break;
+ }
+}
+
+typedef decltype(kernel_cpy_f32_q<QK8_0, block_q8_0, quantize_q8_0>) cpy_f_q_t;
+
+template [[host_name("kernel_cpy_f32_q8_0")]] kernel cpy_f_q_t kernel_cpy_f32_q<QK8_0, block_q8_0, quantize_q8_0>;
+template [[host_name("kernel_cpy_f32_q4_0")]] kernel cpy_f_q_t kernel_cpy_f32_q<QK4_0, block_q4_0, quantize_q4_0>;
+template [[host_name("kernel_cpy_f32_q4_1")]] kernel cpy_f_q_t kernel_cpy_f32_q<QK4_1, block_q4_1, quantize_q4_1>;
+template [[host_name("kernel_cpy_f32_q5_0")]] kernel cpy_f_q_t kernel_cpy_f32_q<QK5_0, block_q5_0, quantize_q5_0>;
+template [[host_name("kernel_cpy_f32_q5_1")]] kernel cpy_f_q_t kernel_cpy_f32_q<QK5_1, block_q5_1, quantize_q5_1>;
+template [[host_name("kernel_cpy_f32_iq4_nl")]] kernel cpy_f_q_t kernel_cpy_f32_q<QK4_NL, block_iq4_nl, quantize_iq4_nl>;
+
+template<typename T4x4, typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread T4x4 &)>
+kernel void kernel_cpy_q_f32(
+ constant ggml_metal_kargs_cpy & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const int i03 = tgpig[2];
+ const int i02 = tgpig[1];
+ const int i01 = ntg[1] == 1 ? tgpig[0]%args.ne01 : tgpig[0]*ntg[1] + tiitg/ntg[0];
+ const int iw0 = ntg[1] == 1 ? tgpig[0]/args.ne01 : 0;
+
+ const int64_t n = i03*args.ne02*args.ne01*args.ne00 + i02*args.ne01*args.ne00 + i01*args.ne00;
+
+ const int64_t i3 = n/(args.ne2*args.ne1*args.ne0);
+ const int64_t i2 = (n - i3*args.ne2*args.ne1*args.ne0)/(args.ne1*args.ne0);
+ const int64_t i1 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0)/args.ne0;
+ const int64_t i0 = (n - i3*args.ne2*args.ne1*args.ne0 - i2*args.ne1*args.ne0 - i1*args.ne0);
+
+ device const block_q * src_data = (device const block_q *)(src0 + i03*args.nb03 + i02*args.nb02 + i01*args.nb01);
+ device T4x4 * dst_data = (device T4x4 *)(dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ for (int64_t i00 = iw0*ntg[0] + tiitg%ntg[0]; i00 < args.nk0; ) {
+ T4x4 temp;
+ dequantize_func(src_data + i00/nl, i00%nl, temp);
+ dst_data[i00] = temp;
+
+ break;
+ }
+}
+
+typedef decltype(kernel_cpy_q_f32<float4x4, block_q4_0, 2, dequantize_q4_0>) cpy_q_f_t;
+
+template [[host_name("kernel_cpy_q4_0_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32<float4x4, block_q4_0, 2, dequantize_q4_0>;
+template [[host_name("kernel_cpy_q4_1_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32<float4x4, block_q4_1, 2, dequantize_q4_1>;
+template [[host_name("kernel_cpy_q5_0_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32<float4x4, block_q5_0, 2, dequantize_q5_0>;
+template [[host_name("kernel_cpy_q5_1_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32<float4x4, block_q5_1, 2, dequantize_q5_1>;
+template [[host_name("kernel_cpy_q8_0_f32")]] kernel cpy_q_f_t kernel_cpy_q_f32<float4x4, block_q8_0, 2, dequantize_q8_0>;
+
+template [[host_name("kernel_cpy_q4_0_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32<half4x4, block_q4_0, 2, dequantize_q4_0>;
+template [[host_name("kernel_cpy_q4_1_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32<half4x4, block_q4_1, 2, dequantize_q4_1>;
+template [[host_name("kernel_cpy_q5_0_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32<half4x4, block_q5_0, 2, dequantize_q5_0>;
+template [[host_name("kernel_cpy_q5_1_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32<half4x4, block_q5_1, 2, dequantize_q5_1>;
+template [[host_name("kernel_cpy_q8_0_f16")]] kernel cpy_q_f_t kernel_cpy_q_f32<half4x4, block_q8_0, 2, dequantize_q8_0>;
+
+kernel void kernel_concat(
+ constant ggml_metal_kargs_concat & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+
+ const int i3 = tgpig.z;
+ const int i2 = tgpig.y;
+ const int i1 = tgpig.x;
+
+ int o[4] = {0, 0, 0, 0};
+ o[args.dim] = args.dim == 0 ? args.ne00 : (args.dim == 1 ? args.ne01 : (args.dim == 2 ? args.ne02 : args.ne03));
+
+ device const float * x;
+
+ for (int i0 = tpitg.x; i0 < args.ne0; i0 += ntg.x) {
+ if (i0 < args.ne00 && i1 < args.ne01 && i2 < args.ne02 && i3 < args.ne03) {
+ x = (device const float *)(src0 + (i3 )*args.nb03 + (i2 )*args.nb02 + (i1 )*args.nb01 + (i0 )*args.nb00);
+ } else {
+ x = (device const float *)(src1 + (i3 - o[3])*args.nb13 + (i2 - o[2])*args.nb12 + (i1 - o[1])*args.nb11 + (i0 - o[0])*args.nb10);
+ }
+
+ device float * y = (device float *)(dst + i3*args.nb3 + i2*args.nb2 + i1*args.nb1 + i0*args.nb0);
+
+ *y = *x;
+ }
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_q2_K_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_q2_K * x = (device const block_q2_K *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const short ix = tiisg/8; // 0...3
+ const short it = tiisg%8; // 0...7
+ const short iq = it/4; // 0 or 1
+ const short ir = it%4; // 0...3
+ const short is = (8*ir)/16;// 0 or 1
+
+ device const float * y4 = y + ix * QK_K + 128 * iq + 8 * ir;
+
+ for (int ib = ix; ib < nb; ib += 4) {
+ float4 sumy = {0.f, 0.f, 0.f, 0.f};
+ for (short i = 0; i < 8; ++i) {
+ yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
+ yl[i+ 8] = y4[i+32]; sumy[1] += yl[i+ 8];
+ yl[i+16] = y4[i+64]; sumy[2] += yl[i+16];
+ yl[i+24] = y4[i+96]; sumy[3] += yl[i+24];
+ }
+
+ device const uint8_t * sc = (device const uint8_t *)x[ib].scales + 8*iq + is;
+ device const uint16_t * qs = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir;
+ device const half * dh = &x[ib].d;
+
+ for (short row = 0; row < nr0; row++) {
+ float4 acc1 = {0.f, 0.f, 0.f, 0.f};
+ float4 acc2 = {0.f, 0.f, 0.f, 0.f};
+ for (int i = 0; i < 8; i += 2) {
+ acc1[0] += yl[i+ 0] * (qs[i/2] & 0x0003);
+ acc2[0] += yl[i+ 1] * (qs[i/2] & 0x0300);
+ acc1[1] += yl[i+ 8] * (qs[i/2] & 0x000c);
+ acc2[1] += yl[i+ 9] * (qs[i/2] & 0x0c00);
+ acc1[2] += yl[i+16] * (qs[i/2] & 0x0030);
+ acc2[2] += yl[i+17] * (qs[i/2] & 0x3000);
+ acc1[3] += yl[i+24] * (qs[i/2] & 0x00c0);
+ acc2[3] += yl[i+25] * (qs[i/2] & 0xc000);
+ }
+ float dall = dh[0];
+ float dmin = dh[1] * 1.f/16.f;
+ sumf[row] += dall * ((acc1[0] + 1.f/256.f * acc2[0]) * (sc[0] & 0xF) * 1.f/ 1.f +
+ (acc1[1] + 1.f/256.f * acc2[1]) * (sc[2] & 0xF) * 1.f/ 4.f +
+ (acc1[2] + 1.f/256.f * acc2[2]) * (sc[4] & 0xF) * 1.f/16.f +
+ (acc1[3] + 1.f/256.f * acc2[3]) * (sc[6] & 0xF) * 1.f/64.f) -
+ dmin * (sumy[0] * (sc[0] & 0xF0) + sumy[1] * (sc[2] & 0xF0) + sumy[2] * (sc[4] & 0xF0) + sumy[3] * (sc[6] & 0xF0));
+
+ qs += args.nb01/2;
+ sc += args.nb01;
+ dh += args.nb01/2;
+ }
+
+ y4 += 4 * QK_K;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_q2_K_f32")]]
+kernel void kernel_mul_mv_q2_K_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_q2_K_f32_impl<N_R0_Q2_K, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_q3_K_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_q3_K * x = (device const block_q3_K *) (src0 + offset0);
+ device const float * yy = (device const float *) (src1 + offset1);
+
+ float yl[32];
+
+ //const uint16_t kmask1 = 0x3030;
+ //const uint16_t kmask2 = 0x0f0f;
+
+ const short tid = tiisg/4;
+ const short ix = tiisg%4;
+ const short ip = tid/4; // 0 or 1
+ const short il = 2*((tid%4)/2); // 0 or 2
+ const short ir = tid%2;
+ const short l0 = 8*ir;
+
+ // One would think that the Metal compiler would figure out that ip and il can only have
+ // 4 possible states, and optimize accordingly. Well, no. It needs help, and we do it
+ // with these two tales.
+ //
+ // Possible masks for the high bit
+ const ushort4 mm[4] = {{0x0001, 0x0100, 0x0002, 0x0200}, // ip = 0, il = 0
+ {0x0004, 0x0400, 0x0008, 0x0800}, // ip = 0, il = 2
+ {0x0010, 0x1000, 0x0020, 0x2000}, // ip = 1, il = 0
+ {0x0040, 0x4000, 0x0080, 0x8000}}; // ip = 1, il = 2
+
+ // Possible masks for the low 2 bits
+ const int4 qm[2] = {{0x0003, 0x0300, 0x000c, 0x0c00}, {0x0030, 0x3000, 0x00c0, 0xc000}};
+
+ const ushort4 hm = mm[2*ip + il/2];
+
+ const short shift = 2*il;
+
+ const float v1 = il == 0 ? 4.f : 64.f;
+ const float v2 = 4.f * v1;
+
+ const uint16_t s_shift1 = 4*ip;
+ const uint16_t s_shift2 = s_shift1 + il;
+
+ const short q_offset = 32*ip + l0;
+ const short y_offset = 128*ip + 32*il + l0;
+
+ device const float * y1 = yy + ix*QK_K + y_offset;
+
+ uint32_t scales32, aux32;
+ thread uint16_t * scales16 = (thread uint16_t *)&scales32;
+ thread const int8_t * scales = (thread const int8_t *)&scales32;
+
+ float sumf1[nr0] = {0.f};
+ float sumf2[nr0] = {0.f};
+
+ for (int i = ix; i < nb; i += 4) {
+ for (short l = 0; l < 8; ++l) {
+ yl[l+ 0] = y1[l+ 0];
+ yl[l+ 8] = y1[l+16];
+ yl[l+16] = y1[l+32];
+ yl[l+24] = y1[l+48];
+ }
+
+ device const uint16_t * q = (device const uint16_t *)(x[i].qs + q_offset);
+ device const uint16_t * h = (device const uint16_t *)(x[i].hmask + l0);
+ device const uint16_t * a = (device const uint16_t *)(x[i].scales);
+ device const half * dh = &x[i].d;
+
+ for (short row = 0; row < nr0; ++row) {
+ const float d_all = (float)dh[0];
+
+ scales16[0] = a[4];
+ scales16[1] = a[5];
+ aux32 = ((scales32 >> s_shift2) << 4) & 0x30303030;
+ scales16[0] = a[il+0];
+ scales16[1] = a[il+1];
+ scales32 = ((scales32 >> s_shift1) & 0x0f0f0f0f) | aux32;
+
+ float s1 = 0, s2 = 0, s3 = 0, s4 = 0, s5 = 0, s6 = 0;
+ for (short l = 0; l < 8; l += 2) {
+ const int32_t qs = q[l/2];
+ s1 += yl[l+0] * (qs & qm[il/2][0]);
+ s2 += yl[l+1] * (qs & qm[il/2][1]);
+ s3 += ((h[l/2] & hm[0]) ? 0.f : yl[l+0]) + ((h[l/2] & hm[1]) ? 0.f : yl[l+1]);
+ s4 += yl[l+16] * (qs & qm[il/2][2]);
+ s5 += yl[l+17] * (qs & qm[il/2][3]);
+ s6 += ((h[l/2] & hm[2]) ? 0.f : yl[l+16]) + ((h[l/2] & hm[3]) ? 0.f : yl[l+17]);
+ }
+ float d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1);
+ float d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2);
+ sumf1[row] += d1 * (scales[0] - 32);
+ sumf2[row] += d2 * (scales[2] - 32);
+
+ s1 = s2 = s3 = s4 = s5 = s6 = 0;
+ for (short l = 0; l < 8; l += 2) {
+ const int32_t qs = q[l/2+8];
+ s1 += yl[l+8] * (qs & qm[il/2][0]);
+ s2 += yl[l+9] * (qs & qm[il/2][1]);
+ s3 += ((h[l/2+8] & hm[0]) ? 0.f : yl[l+8]) + ((h[l/2+8] & hm[1]) ? 0.f : yl[l+9]);
+ s4 += yl[l+24] * (qs & qm[il/2][2]);
+ s5 += yl[l+25] * (qs & qm[il/2][3]);
+ s6 += ((h[l/2+8] & hm[2]) ? 0.f : yl[l+24]) + ((h[l/2+8] & hm[3]) ? 0.f : yl[l+25]);
+ }
+ d1 = d_all * (s1 + 1.f/256.f * s2 - s3*v1);
+ d2 = d_all * (s4 + 1.f/256.f * s5 - s6*v2);
+ sumf1[row] += d1 * (scales[1] - 32);
+ sumf2[row] += d2 * (scales[3] - 32);
+
+ q += args.nb01/2;
+ h += args.nb01/2;
+ a += args.nb01/2;
+ dh += args.nb01/2;
+ }
+
+ y1 += 4 * QK_K;
+ }
+
+ for (int row = 0; row < nr0; ++row) {
+ const float sumf = (sumf1[row] + 0.25f * sumf2[row]) / (1 << shift);
+ sumf1[row] = simd_sum(sumf);
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ if (tiisg == 0) {
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ dst_f32[first_row + row] = sumf1[row];
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_q3_K_f32")]]
+kernel void kernel_mul_mv_q3_K_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_q3_K_f32_impl<N_R0_Q3_K, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_q4_K_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ constexpr uint16_t kmask1 = 0x3f3f;
+ constexpr uint16_t kmask2 = 0x0f0f;
+ constexpr uint16_t kmask3 = 0xc0c0;
+
+ const short ix = tiisg/8; // 0...3
+ const short it = tiisg%8; // 0...7
+ const short iq = it/4; // 0 or 1
+ const short ir = it%4; // 0...3
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_q4_K * x = (device const block_q4_K *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[16];
+ float yh[16];
+
+ float sumf[nr0]={0.f};
+
+ device const float * y4 = y + ix * QK_K + 64 * iq + 8 * ir;
+
+ uint16_t sc16[4];
+ thread const uint8_t * sc8 = (thread const uint8_t *)sc16;
+
+ for (int ib = ix; ib < nb; ib += 4) {
+ float4 sumy = {0.f, 0.f, 0.f, 0.f};
+
+ for (short i = 0; i < 8; ++i) {
+ yl[i+0] = y4[i+ 0]; sumy[0] += yl[i+0];
+ yl[i+8] = y4[i+ 32]; sumy[1] += yl[i+8];
+ yh[i+0] = y4[i+128]; sumy[2] += yh[i+0];
+ yh[i+8] = y4[i+160]; sumy[3] += yh[i+8];
+ }
+
+ device const uint16_t * sc = (device const uint16_t *)x[ib].scales + iq;
+ device const uint16_t * q1 = (device const uint16_t *)x[ib].qs + 16 * iq + 4 * ir;
+ device const half * dh = &x[ib].d;
+
+ for (short row = 0; row < nr0; row++) {
+ sc16[0] = sc[0] & kmask1;
+ sc16[1] = sc[2] & kmask1;
+ sc16[2] = ((sc[4] >> 0) & kmask2) | ((sc[0] & kmask3) >> 2);
+ sc16[3] = ((sc[4] >> 4) & kmask2) | ((sc[2] & kmask3) >> 2);
+
+ device const uint16_t * q2 = q1 + 32;
+
+ float4 acc1 = {0.f, 0.f, 0.f, 0.f};
+ float4 acc2 = {0.f, 0.f, 0.f, 0.f};
+
+ FOR_UNROLL (short i = 0; i < 4; ++i) {
+ acc1[0] += yl[2*i + 0] * (q1[i] & 0x000F);
+ acc1[1] += yl[2*i + 1] * (q1[i] & 0x0F00);
+ acc1[2] += yl[2*i + 8] * (q1[i] & 0x00F0);
+ acc1[3] += yl[2*i + 9] * (q1[i] & 0xF000);
+ acc2[0] += yh[2*i + 0] * (q2[i] & 0x000F);
+ acc2[1] += yh[2*i + 1] * (q2[i] & 0x0F00);
+ acc2[2] += yh[2*i + 8] * (q2[i] & 0x00F0);
+ acc2[3] += yh[2*i + 9] * (q2[i] & 0xF000);
+ }
+
+ sumf[row] += dh[0] * ((acc1[0] + 1.f/256.f * acc1[1]) * sc8[0] +
+ (acc1[2] + 1.f/256.f * acc1[3]) * sc8[1] * 1.f/16.f +
+ (acc2[0] + 1.f/256.f * acc2[1]) * sc8[4] +
+ (acc2[2] + 1.f/256.f * acc2[3]) * sc8[5] * 1.f/16.f) -
+ dh[1] * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]);
+
+ q1 += args.nb01/2;
+ sc += args.nb01/2;
+ dh += args.nb01/2;
+ }
+
+ y4 += 4 * QK_K;
+ }
+
+ device float * dst_f32 = (device float *) dst + (int64_t)im*args.ne0*args.ne1 + (int64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_q4_K_f32")]]
+kernel void kernel_mul_mv_q4_K_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_q4_K_f32_impl<N_R0_Q4_K, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_q5_K_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_q5_K * x = (device const block_q5_K *) (src0 + offset0);
+ device const float * yy = (device const float *) (src1 + offset1);
+
+ float sumf[nr0]={0.f};
+
+ float yl[16], yh[16];
+
+ constexpr uint16_t kmask1 = 0x3f3f;
+ constexpr uint16_t kmask2 = 0x0f0f;
+ constexpr uint16_t kmask3 = 0xc0c0;
+
+ const short tid = tiisg/4;
+ const short ix = tiisg%4;
+ const short iq = tid/4;
+ const short ir = tid%4;
+
+ const short l0 = 8*ir;
+ const short q_offset = 32*iq + l0;
+ const short y_offset = 64*iq + l0;
+
+ const uint8_t hm1 = 1u << (2*iq);
+ const uint8_t hm2 = hm1 << 1;
+ const uint8_t hm3 = hm1 << 4;
+ const uint8_t hm4 = hm2 << 4;
+
+ uint16_t sc16[4];
+ thread const uint8_t * sc8 = (thread const uint8_t *)sc16;
+
+ device const float * y1 = yy + ix*QK_K + y_offset;
+
+ for (int i = ix; i < nb; i += 4) {
+ device const uint8_t * q1 = x[i].qs + q_offset;
+ device const uint8_t * qh = x[i].qh + l0;
+ device const half * dh = &x[i].d;
+ device const uint16_t * a = (device const uint16_t *)x[i].scales + iq;
+
+ device const float * y2 = y1 + 128;
+ float4 sumy = {0.f, 0.f, 0.f, 0.f};
+ for (short l = 0; l < 8; ++l) {
+ yl[l+0] = y1[l+ 0]; sumy[0] += yl[l+0];
+ yl[l+8] = y1[l+32]; sumy[1] += yl[l+8];
+ yh[l+0] = y2[l+ 0]; sumy[2] += yh[l+0];
+ yh[l+8] = y2[l+32]; sumy[3] += yh[l+8];
+ }
+
+ for (short row = 0; row < nr0; ++row) {
+ device const uint8_t * q2 = q1 + 64;
+
+ sc16[0] = a[0] & kmask1;
+ sc16[1] = a[2] & kmask1;
+ sc16[2] = ((a[4] >> 0) & kmask2) | ((a[0] & kmask3) >> 2);
+ sc16[3] = ((a[4] >> 4) & kmask2) | ((a[2] & kmask3) >> 2);
+
+ float4 acc1 = {0.f};
+ float4 acc2 = {0.f};
+ FOR_UNROLL (short l = 0; l < 8; ++l) {
+ uint8_t h = qh[l];
+ acc1[0] += yl[l+0] * (q1[l] & 0x0F);
+ acc1[1] += yl[l+8] * (q1[l] & 0xF0);
+ acc1[2] += yh[l+0] * (q2[l] & 0x0F);
+ acc1[3] += yh[l+8] * (q2[l] & 0xF0);
+ acc2[0] += h & hm1 ? yl[l+0] : 0.f;
+ acc2[1] += h & hm2 ? yl[l+8] : 0.f;
+ acc2[2] += h & hm3 ? yh[l+0] : 0.f;
+ acc2[3] += h & hm4 ? yh[l+8] : 0.f;
+ }
+
+ sumf[row] += dh[0] * (sc8[0] * (acc1[0] + 16.f*acc2[0]) +
+ sc8[1] * (acc1[1]/16.f + 16.f*acc2[1]) +
+ sc8[4] * (acc1[2] + 16.f*acc2[2]) +
+ sc8[5] * (acc1[3]/16.f + 16.f*acc2[3])) -
+ dh[1] * (sumy[0] * sc8[2] + sumy[1] * sc8[3] + sumy[2] * sc8[6] + sumy[3] * sc8[7]);
+
+ q1 += args.nb01;
+ qh += args.nb01;
+ dh += args.nb01/2;
+ a += args.nb01/2;
+ }
+
+ y1 += 4 * QK_K;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ const float tot = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = tot;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_q5_K_f32")]]
+kernel void kernel_mul_mv_q5_K_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_q5_K_f32_impl<N_R0_Q5_K, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_q6_K_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ constexpr uint8_t kmask1 = 0x03;
+ constexpr uint8_t kmask2 = 0x0C;
+ constexpr uint8_t kmask3 = 0x30;
+ constexpr uint8_t kmask4 = 0xC0;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_q6_K * x = (device const block_q6_K *) (src0 + offset0);
+ device const float * yy = (device const float *) (src1 + offset1);
+
+ float sumf[nr0] = { 0.f };
+
+ float yl[16];
+
+ const short tid = tiisg/2;
+ const short ix = tiisg%2;
+ const short ip = tid/8; // 0 or 1
+ const short il = tid%8;
+ const short l0 = 4*il;
+ const short is = 8*ip + l0/16;
+
+ const short y_offset = 128*ip + l0;
+ const short q_offset_l = 64*ip + l0;
+ const short q_offset_h = 32*ip + l0;
+
+ for (int i = ix; i < nb; i += 2) {
+ device const uint8_t * q1 = x[i].ql + q_offset_l;
+ device const uint8_t * q2 = q1 + 32;
+ device const uint8_t * qh = x[i].qh + q_offset_h;
+ device const int8_t * sc = x[i].scales + is;
+ device const half * dh = &x[i].d;
+
+ device const float * y = yy + i * QK_K + y_offset;
+
+ for (short l = 0; l < 4; ++l) {
+ yl[4*l + 0] = y[l + 0];
+ yl[4*l + 1] = y[l + 32];
+ yl[4*l + 2] = y[l + 64];
+ yl[4*l + 3] = y[l + 96];
+ }
+
+ for (short row = 0; row < nr0; ++row) {
+ float4 sums = {0.f, 0.f, 0.f, 0.f};
+
+ FOR_UNROLL (short l = 0; l < 4; ++l) {
+ sums[0] += yl[4*l + 0] * ((int8_t)((q1[l] & 0xF) | ((qh[l] & kmask1) << 4)) - 32);
+ sums[1] += yl[4*l + 1] * ((int8_t)((q2[l] & 0xF) | ((qh[l] & kmask2) << 2)) - 32);
+ sums[2] += yl[4*l + 2] * ((int8_t)((q1[l] >> 4) | ((qh[l] & kmask3) << 0)) - 32);
+ sums[3] += yl[4*l + 3] * ((int8_t)((q2[l] >> 4) | ((qh[l] & kmask4) >> 2)) - 32);
+ }
+
+ sumf[row] += dh[0] * (sums[0] * sc[0] + sums[1] * sc[2] + sums[2] * sc[4] + sums[3] * sc[6]);
+
+ q1 += args.nb01;
+ q2 += args.nb01;
+ qh += args.nb01;
+ sc += args.nb01;
+ dh += args.nb01/2;
+ }
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_q6_K_f32")]]
+kernel void kernel_mul_mv_q6_K_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_q6_K_f32_impl<N_R0_Q6_K, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg);
+}
+
+// ======================= "True" 2-bit
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_iq2_xxs_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq2_xxs * x = (device const block_iq2_xxs *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const int nb32 = nb * (QK_K / 32);
+
+ threadgroup uint64_t * svalues = (threadgroup uint64_t *)(shmem);
+ threadgroup uint8_t * ssigns = (threadgroup uint8_t *)(svalues + 256);
+ {
+ int nval = 4;
+ int pos = (32*sgitg + tiisg)*nval;
+ for (int i = 0; i < nval; ++i) svalues[pos + i] = iq2xxs_grid[pos + i];
+ nval = 2;
+ pos = (32*sgitg + tiisg)*nval;
+ for (int i = 0; i < nval; ++i) ssigns[pos+i] = ksigns_iq2xs[pos+i];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ const int ix = tiisg;
+
+ device const float * y4 = y + 32 * ix;
+
+ for (int ib32 = ix; ib32 < nb32; ib32 += 32) {
+ for (short i = 0; i < 32; ++i) {
+ yl[i] = y4[i];
+ }
+
+ const int ibl = ib32 / (QK_K / 32);
+ const int ib = ib32 % (QK_K / 32);
+
+ device const block_iq2_xxs * xr = x + ibl;
+ device const uint16_t * q2 = xr->qs + 4 * ib;
+ device const half * dh = &xr->d;
+
+ for (short row = 0; row < nr0; row++) {
+ const float db = dh[0];
+ device const uint8_t * aux8 = (device const uint8_t *)q2;
+ const uint32_t aux32 = q2[2] | (q2[3] << 16);
+ const float d = db * (0.5f + (aux32 >> 28));
+
+ float sum = 0;
+ for (short l = 0; l < 4; ++l) {
+ const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(svalues + aux8[l]);
+ const uint8_t signs = ssigns[(aux32 >> 7*l) & 127];
+ for (short j = 0; j < 8; ++j) {
+ sum += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
+ }
+ }
+ sumf[row] += d * sum;
+
+ dh += args.nb01/2;
+ q2 += args.nb01/2;
+ }
+
+ y4 += 32 * 32;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all * 0.25f;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq2_xxs_f32")]]
+kernel void kernel_mul_mv_iq2_xxs_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ kernel_mul_mv_iq2_xxs_f32_impl<N_R0_IQ2_XXS, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_iq2_xs_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq2_xs * x = (device const block_iq2_xs *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const int nb32 = nb * (QK_K / 32);
+
+ threadgroup uint64_t * svalues = (threadgroup uint64_t *)(shmem);
+ threadgroup uint8_t * ssigns = (threadgroup uint8_t *)(svalues + 512);
+ {
+ int nval = 8;
+ int pos = (32*sgitg + tiisg)*nval;
+ for (int i = 0; i < nval; ++i) svalues[pos + i] = iq2xs_grid[pos + i];
+ nval = 2;
+ pos = (32*sgitg + tiisg)*nval;
+ for (int i = 0; i < nval; ++i) ssigns[pos+i] = ksigns_iq2xs[pos+i];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ const int ix = tiisg;
+
+ device const float * y4 = y + 32 * ix;
+
+ for (int ib32 = ix; ib32 < nb32; ib32 += 32) {
+ for (short i = 0; i < 32; ++i) {
+ yl[i] = y4[i];
+ }
+
+ const int ibl = ib32 / (QK_K / 32);
+ const int ib = ib32 % (QK_K / 32);
+
+ device const block_iq2_xs * xr = x + ibl;
+ device const uint16_t * q2 = xr->qs + 4 * ib;
+ device const uint8_t * sc = xr->scales + ib;
+ device const half * dh = &xr->d;
+
+ for (short row = 0; row < nr0; row++) {
+ const float db = dh[0];
+ const uint8_t ls1 = sc[0] & 0xf;
+ const uint8_t ls2 = sc[0] >> 4;
+ const float d1 = db * (0.5f + ls1);
+ const float d2 = db * (0.5f + ls2);
+
+ float sum1 = 0, sum2 = 0;
+ for (short l = 0; l < 2; ++l) {
+ const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(svalues + (q2[l] & 511));
+ const uint8_t signs = ssigns[(q2[l] >> 9)];
+ for (short j = 0; j < 8; ++j) {
+ sum1 += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
+ }
+ }
+ for (short l = 2; l < 4; ++l) {
+ const threadgroup uint8_t * grid = (const threadgroup uint8_t *)(svalues + (q2[l] & 511));
+ const uint8_t signs = ssigns[(q2[l] >> 9)];
+ for (short j = 0; j < 8; ++j) {
+ sum2 += yl[8*l + j] * grid[j] * (signs & kmask_iq2xs[j] ? -1.f : 1.f);
+ }
+ }
+ sumf[row] += d1 * sum1 + d2 * sum2;
+
+ dh += args.nb01/2;
+ q2 += args.nb01/2;
+ sc += args.nb01;
+ }
+
+ y4 += 32 * 32;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all * 0.25f;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq2_xs_f32")]]
+kernel void kernel_mul_mv_iq2_xs_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq2_xs_f32_impl<N_R0_IQ2_XS, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_iq3_xxs_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq3_xxs * x = (device const block_iq3_xxs *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const int nb32 = nb * (QK_K / 32);
+
+ threadgroup uint32_t * svalues = (threadgroup uint32_t *)(shmem);
+ threadgroup uint8_t * ssigns = (threadgroup uint8_t *)(svalues + 256);
+ {
+ int nval = 4;
+ int pos = (32*sgitg + tiisg)*nval;
+ for (int i = 0; i < nval; ++i) svalues[pos + i] = iq3xxs_grid[pos + i];
+ nval = 2;
+ pos = (32*sgitg + tiisg)*nval;
+ for (int i = 0; i < nval; ++i) ssigns[pos+i] = ksigns_iq2xs[pos+i];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ const int ix = tiisg;
+
+ device const float * y4 = y + 32 * ix;
+
+ for (int ib32 = ix; ib32 < nb32; ib32 += 32) {
+ for (short i = 0; i < 32; ++i) {
+ yl[i] = y4[i];
+ }
+
+ const int ibl = ib32 / (QK_K / 32);
+ const int ib = ib32 % (QK_K / 32);
+
+ device const block_iq3_xxs * xr = x + ibl;
+ device const uint8_t * q3 = xr->qs + 8 * ib;
+ device const uint16_t * gas = (device const uint16_t *)(xr->qs + QK_K/4) + 2 * ib;
+ device const half * dh = &xr->d;
+
+ for (short row = 0; row < nr0; row++) {
+ const float db = dh[0];
+ const uint32_t aux32 = gas[0] | (gas[1] << 16);
+ const float d = db * (0.5f + (aux32 >> 28));
+
+ float2 sum = {0};
+ for (short l = 0; l < 4; ++l) {
+ const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(svalues + q3[2*l+0]);
+ const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(svalues + q3[2*l+1]);
+ const uint8_t signs = ssigns[(aux32 >> 7*l) & 127];
+ for (short j = 0; j < 4; ++j) {
+ sum[0] += yl[8*l + j + 0] * grid1[j] * (signs & kmask_iq2xs[j+0] ? -1.f : 1.f);
+ sum[1] += yl[8*l + j + 4] * grid2[j] * (signs & kmask_iq2xs[j+4] ? -1.f : 1.f);
+ }
+ }
+ sumf[row] += d * (sum[0] + sum[1]);
+
+ dh += args.nb01/2;
+ q3 += args.nb01;
+ gas += args.nb01/2;
+ }
+
+ y4 += 32 * 32;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all * 0.5f;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq3_xxs_f32")]]
+kernel void kernel_mul_mv_iq3_xxs_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq3_xxs_f32_impl<N_R0_IQ3_XXS, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_iq3_s_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq3_s * x = (device const block_iq3_s *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const int nb32 = nb * (QK_K / 32);
+
+ threadgroup uint32_t * svalues = (threadgroup uint32_t *) shmem;
+ {
+ int nval = 8;
+ int pos = (32*sgitg + tiisg)*nval;
+ for (int i = 0; i < nval; ++i) svalues[pos + i] = iq3s_grid[pos + i];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ const int ix = tiisg;
+
+ device const float * y4 = y + 32 * ix;
+
+ for (int ib32 = ix; ib32 < nb32; ib32 += 32) {
+ for (short i = 0; i < 32; ++i) {
+ yl[i] = y4[i];
+ }
+
+ const int ibl = ib32 / (QK_K / 32);
+ const int ib = ib32 % (QK_K / 32);
+
+ device const block_iq3_s * xr = x + ibl;
+ device const uint8_t * qs = xr->qs + 8 * ib;
+ device const uint8_t * qh = xr->qh + ib;
+ device const uint8_t * sc = xr->scales + (ib/2);
+ device const uint8_t * signs = xr->signs + 4 * ib;
+ device const half * dh = &xr->d;
+
+ for (short row = 0; row < nr0; row++) {
+ const float db = dh[0];
+ const float d = db * (1 + 2*((sc[0] >> 4*(ib%2)) & 0xf));
+
+ float2 sum = {0};
+ for (short l = 0; l < 4; ++l) {
+ const threadgroup uint32_t * table1 = qh[0] & kmask_iq2xs[2*l+0] ? svalues + 256 : svalues;
+ const threadgroup uint32_t * table2 = qh[0] & kmask_iq2xs[2*l+1] ? svalues + 256 : svalues;
+ const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(table1 + qs[2*l+0]);
+ const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(table2 + qs[2*l+1]);
+ for (short j = 0; j < 4; ++j) {
+ sum[0] += yl[8*l + j + 0] * grid1[j] * select(1, -1, signs[l] & kmask_iq2xs[j+0]);
+ sum[1] += yl[8*l + j + 4] * grid2[j] * select(1, -1, signs[l] & kmask_iq2xs[j+4]);
+ }
+ }
+ sumf[row] += d * (sum[0] + sum[1]);
+
+ dh += args.nb01/2;
+ qs += args.nb01;
+ qh += args.nb01;
+ sc += args.nb01;
+ signs += args.nb01;
+ }
+
+ y4 += 32 * 32;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq3_s_f32")]]
+kernel void kernel_mul_mv_iq3_s_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq3_s_f32_impl<N_R0_IQ3_S, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_iq2_s_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq2_s * x = (device const block_iq2_s *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const int nb32 = nb * (QK_K / 32);
+
+ //threadgroup uint64_t * svalues = (threadgroup uint64_t *) shmem;
+ //{
+ // int nval = 32;
+ // int pos = (32*sgitg + tiisg)*nval;
+ // for (int i = 0; i < nval; ++i) svalues[pos + i] = iq2s_grid[pos + i];
+ // threadgroup_barrier(mem_flags::mem_threadgroup);
+ //}
+
+ const short ix = tiisg;
+
+ device const float * y4 = y + 32 * ix;
+
+ for (int ib32 = ix; ib32 < nb32; ib32 += 32) {
+ for (short i = 0; i < 32; ++i) {
+ yl[i] = y4[i];
+ }
+
+ const int ibl = ib32 / (QK_K / 32);
+ const int ib = ib32 % (QK_K / 32);
+
+ device const block_iq2_s * xr = x + ibl;
+ device const uint8_t * qs = xr->qs + 4 * ib;
+ device const uint8_t * qh = xr->qh + ib;
+ device const uint8_t * sc = xr->scales + ib;
+ device const uint8_t * signs = qs + QK_K/8;
+ device const half * dh = &xr->d;
+
+ for (short row = 0; row < nr0; row++) {
+ const float db = dh[0];
+ const float d1 = db * (0.5f + (sc[0] & 0xf));
+ const float d2 = db * (0.5f + (sc[0] >> 4));
+
+ float2 sum = {0};
+ for (short l = 0; l < 2; ++l) {
+ //const threadgroup uint8_t * grid1 = (const threadgroup uint8_t *)(svalues + (qs[l+0] | ((qh[0] << (8-2*l)) & 0x300)));
+ //const threadgroup uint8_t * grid2 = (const threadgroup uint8_t *)(svalues + (qs[l+2] | ((qh[0] << (4-2*l)) & 0x300)));
+ constant uint8_t * grid1 = (constant uint8_t *)(iq2s_grid + (qs[l+0] | ((qh[0] << (8-2*l)) & 0x300)));
+ constant uint8_t * grid2 = (constant uint8_t *)(iq2s_grid + (qs[l+2] | ((qh[0] << (4-2*l)) & 0x300)));
+ for (short j = 0; j < 8; ++j) {
+ sum[0] += yl[8*l + j + 0] * grid1[j] * select(1, -1, signs[l+0] & kmask_iq2xs[j]);
+ sum[1] += yl[8*l + j + 16] * grid2[j] * select(1, -1, signs[l+2] & kmask_iq2xs[j]);
+ }
+ }
+ sumf[row] += d1 * sum[0] + d2 * sum[1];
+
+ dh += args.nb01/2;
+ qs += args.nb01;
+ qh += args.nb01;
+ sc += args.nb01;
+ signs += args.nb01;
+ }
+
+ y4 += 32 * 32;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all * 0.25f;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq2_s_f32")]]
+kernel void kernel_mul_mv_iq2_s_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq2_s_f32_impl<N_R0_IQ2_S, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_iq1_s_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq1_s * x = (device const block_iq1_s *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const int nb32 = nb * (QK_K / 32);
+
+ const short ix = tiisg;
+
+ device const float * y4 = y + 32 * ix;
+
+ for (int ib32 = ix; ib32 < nb32; ib32 += 32) {
+ float sumy = 0;
+ for (short i = 0; i < 32; ++i) {
+ yl[i] = y4[i];
+ sumy += yl[i];
+ }
+
+ const int ibl = ib32 / (QK_K / 32);
+ const int ib = ib32 % (QK_K / 32);
+
+ device const block_iq1_s * xr = x + ibl;
+ device const uint8_t * qs = xr->qs + 4 * ib;
+ device const uint16_t * qh = xr->qh + ib;
+ device const half * dh = &xr->d;
+
+ for (short row = 0; row < nr0; row++) {
+ constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700)));
+ constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 5) & 0x700)));
+ constant uint8_t * grid3 = (constant uint8_t *)(iq1s_grid_gpu + (qs[2] | ((qh[0] << 2) & 0x700)));
+ constant uint8_t * grid4 = (constant uint8_t *)(iq1s_grid_gpu + (qs[3] | ((qh[0] >> 1) & 0x700)));
+
+ float sum = 0;
+ for (short j = 0; j < 4; ++j) {
+ sum += yl[j+ 0] * (grid1[j] & 0xf) + yl[j+ 4] * (grid1[j] >> 4)
+ + yl[j+ 8] * (grid2[j] & 0xf) + yl[j+12] * (grid2[j] >> 4)
+ + yl[j+16] * (grid3[j] & 0xf) + yl[j+20] * (grid3[j] >> 4)
+ + yl[j+24] * (grid4[j] & 0xf) + yl[j+28] * (grid4[j] >> 4);
+ }
+ sumf[row] += (float)dh[0] * (sum + sumy * (qh[0] & 0x8000 ? -1 - IQ1S_DELTA : -1 + IQ1S_DELTA)) * (2*((qh[0] >> 12) & 7) + 1);
+
+ dh += args.nb01/2;
+ qs += args.nb01;
+ qh += args.nb01/2;
+ }
+
+ y4 += 32 * 32;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq1_s_f32")]]
+kernel void kernel_mul_mv_iq1_s_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq1_s_f32_impl<N_R0_IQ1_S, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg);
+}
+
+template<int nr0, typename args_t>
+void kernel_mul_mv_iq1_m_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ const int nb = args.ne00/QK_K;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * nr0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq1_m * x = (device const block_iq1_m *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ float yl[32];
+ float sumf[nr0]={0.f};
+
+ const int nb32 = nb * (QK_K / 32);
+
+ const short ix = tiisg;
+
+ device const float * y4 = y + 32 * ix;
+
+ iq1m_scale_t scale;
+
+ for (int ib32 = ix; ib32 < nb32; ib32 += 32) {
+ float4 sumy = {0.f};
+ for (short i = 0; i < 8; ++i) {
+ yl[i+ 0] = y4[i+ 0]; sumy[0] += yl[i+ 0];
+ yl[i+ 8] = y4[i+ 8]; sumy[1] += yl[i+ 8];
+ yl[i+16] = y4[i+16]; sumy[2] += yl[i+16];
+ yl[i+24] = y4[i+24]; sumy[3] += yl[i+24];
+ }
+
+ const int ibl = ib32 / (QK_K / 32);
+ const int ib = ib32 % (QK_K / 32);
+
+ device const block_iq1_m * xr = x + ibl;
+ device const uint8_t * qs = xr->qs + 4 * ib;
+ device const uint8_t * qh = xr->qh + 2 * ib;
+ device const uint16_t * sc = (device const uint16_t *)xr->scales;
+
+ for (short row = 0; row < nr0; row++) {
+ scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
+
+ constant uint8_t * grid1 = (constant uint8_t *)(iq1s_grid_gpu + (qs[0] | ((qh[0] << 8) & 0x700)));
+ constant uint8_t * grid2 = (constant uint8_t *)(iq1s_grid_gpu + (qs[1] | ((qh[0] << 4) & 0x700)));
+ constant uint8_t * grid3 = (constant uint8_t *)(iq1s_grid_gpu + (qs[2] | ((qh[1] << 8) & 0x700)));
+ constant uint8_t * grid4 = (constant uint8_t *)(iq1s_grid_gpu + (qs[3] | ((qh[1] << 4) & 0x700)));
+
+ float2 sum = {0.f};
+ for (short j = 0; j < 4; ++j) {
+ sum[0] += yl[j+ 0] * (grid1[j] & 0xf) + yl[j+ 4] * (grid1[j] >> 4)
+ + yl[j+ 8] * (grid2[j] & 0xf) + yl[j+12] * (grid2[j] >> 4);
+ sum[1] += yl[j+16] * (grid3[j] & 0xf) + yl[j+20] * (grid3[j] >> 4)
+ + yl[j+24] * (grid4[j] & 0xf) + yl[j+28] * (grid4[j] >> 4);
+ }
+ const float delta1 = sumy[0] * (qh[0] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA) + sumy[1] * (qh[0] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA);
+ const float delta2 = sumy[2] * (qh[1] & 0x08 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA) + sumy[3] * (qh[1] & 0x80 ? -1 - IQ1M_DELTA : -1 + IQ1M_DELTA);
+
+ sumf[row] += (float)scale.f16 * ((sum[0] + delta1) * (2*((sc[ib/2] >> (6*(ib%2)+0)) & 7) + 1) +
+ (sum[1] + delta2) * (2*((sc[ib/2] >> (6*(ib%2)+3)) & 7) + 1));
+
+ sc += args.nb01/2;
+ qs += args.nb01;
+ qh += args.nb01;
+ }
+
+ y4 += 32 * 32;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < nr0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq1_m_f32")]]
+kernel void kernel_mul_mv_iq1_m_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq1_m_f32_impl<N_R0_IQ1_M, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, nullptr, tgpig, tiisg, sgitg);
+}
+
+template<int NR0, typename args_t>
+void kernel_mul_mv_iq4_nl_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ threadgroup float * shmem_f32 = (threadgroup float *) shmem;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * NR0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq4_nl * x = (device const block_iq4_nl *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ const int nb = args.ne00/QK4_NL;
+ const int ns01 = args.nb01/args.nb00;
+
+ const short ix = tiisg/2; // 0...15
+ const short it = tiisg%2; // 0 or 1
+
+ shmem_f32[tiisg] = kvalues_iq4nl_f[tiisg%16];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ float4 yl[4];
+ float sumf[NR0]={0.f};
+
+ device const float * yb = y + ix*QK4_NL + it*8;
+
+ uint32_t aux32[2];
+ thread const uint8_t * q8 = (thread const uint8_t *)aux32;
+
+ float4 qf1, qf2;
+
+ // [TAG_MUL_MV_WEIRD]
+ for (int ib = ix; ib < nb && ib < ns01; ib += 16) {
+ device const float4 * y4 = (device const float4 *)yb;
+ yl[0] = y4[0];
+ yl[1] = y4[4];
+ yl[2] = y4[1];
+ yl[3] = y4[5];
+
+ for (short row = 0; row < NR0; row++) {
+ device const block_iq4_nl & xb = x[row*ns01 + ib];
+ device const uint16_t * q4 = (device const uint16_t *)(xb.qs + 8*it);
+
+ float4 acc1 = {0.f}, acc2 = {0.f};
+
+ aux32[0] = q4[0] | (q4[1] << 16);
+ aux32[1] = (aux32[0] >> 4) & 0x0f0f0f0f;
+ aux32[0] &= 0x0f0f0f0f;
+ qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]};
+ qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]};
+ acc1 += yl[0] * qf1;
+ acc2 += yl[1] * qf2;
+
+ aux32[0] = q4[2] | (q4[3] << 16);
+ aux32[1] = (aux32[0] >> 4) & 0x0f0f0f0f;
+ aux32[0] &= 0x0f0f0f0f;
+ qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]};
+ qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]};
+ acc1 += yl[2] * qf1;
+ acc2 += yl[3] * qf2;
+
+ acc1 += acc2;
+
+ sumf[row] += (float)xb.d * (acc1[0] + acc1[1] + acc1[2] + acc1[3]);
+ }
+
+ yb += 16 * QK4_NL;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < NR0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq4_nl_f32")]]
+kernel void kernel_mul_mv_iq4_nl_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq4_nl_f32_impl<N_R0_IQ4_NL, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<int NR0, typename args_t>
+void kernel_mul_mv_iq4_xs_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ threadgroup float * shmem_f32 = (threadgroup float *) shmem;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+ const int first_row = (r0 * NSG + sgitg) * NR0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_iq4_xs * x = (device const block_iq4_xs *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ const int nb = args.ne00/QK_K;
+ const int ns01 = args.nb01/args.nb00;
+
+ const short ix = tiisg/16; // 0 or 1
+ const short it = tiisg%16; // 0...15
+ const short ib = it/2;
+ const short il = it%2;
+
+ shmem_f32[tiisg] = kvalues_iq4nl_f[tiisg%16];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ float4 yl[4];
+ float sumf[NR0]={0.f};
+
+ device const float * yb = y + ix * QK_K + ib * 32 + il * 8;
+
+ uint32_t aux32[2];
+ thread const uint8_t * q8 = (thread const uint8_t *)aux32;
+
+ float4 qf1, qf2;
+
+ // [TAG_MUL_MV_WEIRD]
+ for (int ibl = ix; ibl < nb && ibl < ns01; ibl += 2) {
+ device const float4 * y4 = (device const float4 *)yb;
+ yl[0] = y4[0];
+ yl[1] = y4[4];
+ yl[2] = y4[1];
+ yl[3] = y4[5];
+
+ for (short row = 0; row < NR0; ++row) {
+ device const block_iq4_xs & xb = x[row*ns01 + ibl];
+ device const uint32_t * q4 = (device const uint32_t *)(xb.qs + 16*ib + 8*il);
+
+ float4 acc1 = {0.f}, acc2 = {0.f};
+
+ aux32[0] = (q4[0] ) & 0x0f0f0f0f;
+ aux32[1] = (q4[0] >> 4) & 0x0f0f0f0f;
+ qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]};
+ qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]};
+ acc1 += yl[0] * qf1;
+ acc2 += yl[1] * qf2;
+
+ aux32[0] = (q4[1] ) & 0x0f0f0f0f;
+ aux32[1] = (q4[1] >> 4) & 0x0f0f0f0f;
+ qf1 = {shmem_f32[q8[0]], shmem_f32[q8[1]], shmem_f32[q8[2]], shmem_f32[q8[3]]};
+ qf2 = {shmem_f32[q8[4]], shmem_f32[q8[5]], shmem_f32[q8[6]], shmem_f32[q8[7]]};
+ acc1 += yl[2] * qf1;
+ acc2 += yl[3] * qf2;
+
+ acc1 += acc2;
+
+ const int ls = (((xb.scales_l[ib/2] >> 4*(ib%2)) & 0xf) | (((xb.scales_h >> 2*ib) & 3) << 4)) - 32;
+ sumf[row] += (float)xb.d * ls * (acc1[0] + acc1[1] + acc1[2] + acc1[3]);
+ }
+
+ yb += 2 * QK_K;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < NR0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_iq4_xs_f32")]]
+kernel void kernel_mul_mv_iq4_xs_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_iq4_xs_f32_impl<N_R0_IQ4_XS, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<int NR0, typename args_t>
+void kernel_mul_mv_mxfp4_f32_impl(
+ args_t args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg) {
+ const short NSG = FC_mul_mv_nsg;
+
+ threadgroup float * shmem_f32 = (threadgroup float *) shmem;
+
+ const int r0 = tgpig.x;
+ const int r1 = tgpig.y;
+ const int im = tgpig.z;
+
+ const int first_row = (r0 * NSG + sgitg) * NR0;
+
+ const uint i12 = im%args.ne12;
+ const uint i13 = im/args.ne12;
+
+ const uint64_t offset0 = first_row*args.nb01 + (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const uint64_t offset1 = r1*args.nb11 + (i12 )*args.nb12 + (i13 )*args.nb13;
+
+ device const block_mxfp4 * x = (device const block_mxfp4 *) (src0 + offset0);
+ device const float * y = (device const float *) (src1 + offset1);
+
+ const int nb = args.ne00/QK_MXFP4;
+ const int ns01 = args.nb01/args.nb00; // this can be larger than nb for permuted src0 tensors
+
+ const short ix = tiisg/2; // 0...15
+ const short it = tiisg%2; // 0 or 1
+
+ shmem_f32[tiisg] = kvalues_mxfp4_f[tiisg%16];
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ float4 yl[4];
+ float sumf[NR0]={0.f};
+
+ device const float * yb = y + ix*QK_MXFP4 + it*8;
+
+ // note: just the check `ib < nb` is enough, but adding the redundant `&& ib < ns01` check makes the kernel a bit faster
+ // no idea why that is - needs some deeper investigation [TAG_MUL_MV_WEIRD]
+ for (int ib = ix; ib < nb && ib < ns01; ib += 16) {
+ device const float4 * y4 = (device const float4 *) yb;
+
+ yl[0] = y4[0];
+ yl[1] = y4[4];
+ yl[2] = y4[1];
+ yl[3] = y4[5];
+
+ FOR_UNROLL (short row = 0; row < NR0; row++) {
+ device const block_mxfp4 & xb = x[row*ns01 + ib];
+ device const uint8_t * q2 = (device const uint8_t *)(xb.qs + 8*it);
+
+ float4 acc1 = yl[0]*float4(shmem_f32[q2[0] & 0x0F], shmem_f32[q2[1] & 0x0F], shmem_f32[q2[2] & 0x0F], shmem_f32[q2[3] & 0x0F]);
+ float4 acc2 = yl[1]*float4(shmem_f32[q2[0] >> 4 ], shmem_f32[q2[1] >> 4 ], shmem_f32[q2[2] >> 4 ], shmem_f32[q2[3] >> 4 ]);
+ float4 acc3 = yl[2]*float4(shmem_f32[q2[4] & 0x0F], shmem_f32[q2[5] & 0x0F], shmem_f32[q2[6] & 0x0F], shmem_f32[q2[7] & 0x0F]);
+ float4 acc4 = yl[3]*float4(shmem_f32[q2[4] >> 4 ], shmem_f32[q2[5] >> 4 ], shmem_f32[q2[6] >> 4 ], shmem_f32[q2[7] >> 4 ]);
+
+ acc1 = (acc1 + acc3) + (acc2 + acc4);
+
+ sumf[row] += e8m0_to_fp32(xb.e) * ((acc1[0] + acc1[1]) + (acc1[2] + acc1[3]));
+ }
+
+ yb += 16 * QK_MXFP4;
+ }
+
+ device float * dst_f32 = (device float *) dst + (uint64_t)im*args.ne0*args.ne1 + (uint64_t)r1*args.ne0;
+
+ for (int row = 0; row < NR0 && first_row + row < args.ne0; ++row) {
+ float sum_all = simd_sum(sumf[row]);
+ if (tiisg == 0) {
+ dst_f32[first_row + row] = sum_all;
+ }
+ }
+}
+
+[[host_name("kernel_mul_mv_mxfp4_f32")]]
+kernel void kernel_mul_mv_mxfp4_f32(
+ constant ggml_metal_kargs_mul_mv & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ kernel_mul_mv_mxfp4_f32_impl<N_R0_MXFP4, constant ggml_metal_kargs_mul_mv &>(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+template<typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread float4x4 &)>
+kernel void kernel_get_rows_q(
+ constant ggml_metal_kargs_get_rows & args,
+ device const void * src0,
+ device const void * src1,
+ device void * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 ntg [[threads_per_threadgroup]]) {
+ const int32_t iw0 = tgpig.x/args.ne10;
+ const int32_t i10 = tgpig.x%args.ne10;
+ const int32_t i11 = tgpig.y;
+ const int32_t i12 = tgpig.z;
+
+ const int32_t r = ((const device int32_t *) ((const device char *) src1 + i12*args.nb12 + i11*args.nb11 + i10*args.nb10))[0];
+
+ const int32_t i02 = i11;
+ const int32_t i03 = i12;
+
+ auto psrc = (device const block_q *) ((const device char *) src0 + i03*args.nb03 + i02*args.nb02 + r*args.nb01);
+ auto pdst = (device float4x4 *) (( device char *) dst + i12*args.nb3 + i11*args.nb2 + i10*args.nb1);
+
+ for (int ind = iw0*ntg.x + tiitg; ind < args.ne00t;) {
+ float4x4 temp;
+ dequantize_func(psrc + ind/nl, ind%nl, temp);
+ pdst[ind] = temp;
+
+ break;
+ }
+}
+
+template<typename T0, typename T>
+kernel void kernel_get_rows_f(
+ constant ggml_metal_kargs_get_rows & args,
+ device const void * src0,
+ device const void * src1,
+ device void * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort3 ntg [[threads_per_threadgroup]]) {
+ const int32_t iw0 = tgpig.x/args.ne10;
+ const int32_t i10 = tgpig.x%args.ne10;
+ const int32_t i11 = tgpig.y;
+ const int32_t i12 = tgpig.z;
+
+ const int32_t r = ((const device int32_t *) ((const device char *) src1 + i12*args.nb12 + i11*args.nb11 + i10*args.nb10))[0];
+
+ const int32_t i02 = i11;
+ const int32_t i03 = i12;
+
+ auto psrc = (const device T0 *) ((const device char *) src0 + i03*args.nb03 + i02*args.nb02 + r*args.nb01);
+ auto pdst = ( device T *) (( device char *) dst + i12*args.nb3 + i11*args.nb2 + i10*args.nb1);
+
+ for (int ind = iw0*ntg.x + tiitg; ind < args.ne00t;) {
+ pdst[ind] = psrc[ind];
+
+ break;
+ }
+}
+
+template<typename TI, typename block_q, void (*quantize_func)(device const float *, device block_q &)>
+kernel void kernel_set_rows_q32(
+ constant ggml_metal_kargs_set_rows & args,
+ device const void * src0,
+ device const void * src1,
+ device float * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiitg[[thread_index_in_threadgroup]],
+ uint3 tptg [[threads_per_threadgroup]]) {
+ const int32_t i03 = tgpig.z;
+ const int32_t i02 = tgpig.y;
+
+ const int32_t i12 = i03%args.ne12;
+ const int32_t i11 = i02%args.ne11;
+
+ const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x;
+ if (i01 >= args.ne01) {
+ return;
+ }
+
+ const int32_t i10 = i01;
+ const TI i1 = ((const device TI *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0];
+
+ device block_q * dst_row = ( device block_q *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3);
+ const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03);
+
+ for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) {
+ quantize_func(src_row + 32*ind, dst_row[ind]);
+ }
+}
+
+template<typename T, typename TI>
+kernel void kernel_set_rows_f(
+ constant ggml_metal_kargs_set_rows & args,
+ device const void * src0,
+ device const void * src1,
+ device float * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ uint tiitg[[thread_index_in_threadgroup]],
+ uint3 tptg [[threads_per_threadgroup]]) {
+ const int32_t i03 = tgpig.z;
+ const int32_t i02 = tgpig.y;
+
+ const int32_t i12 = i03%args.ne12;
+ const int32_t i11 = i02%args.ne11;
+
+ const int32_t i01 = tgpig.x*tptg.y + tiitg/tptg.x;
+ if (i01 >= args.ne01) {
+ return;
+ }
+
+ const int32_t i10 = i01;
+ const TI i1 = ((const device TI *) ((const device char *) src1 + i10*args.nb10 + i11*args.nb11 + i12*args.nb12))[0];
+
+ device T * dst_row = ( device T *) (( device char *) dst + i1*args.nb1 + i02*args.nb2 + i03*args.nb3);
+ const device float * src_row = (const device float *) ((const device char *) src0 + i01*args.nb01 + i02*args.nb02 + i03*args.nb03);
+
+ for (int ind = tiitg%tptg.x; ind < args.nk0; ind += tptg.x) {
+ dst_row[ind] = (T) src_row[ind];
+ }
+}
+
+kernel void kernel_diag_f32(
+ constant ggml_metal_kargs_diag & args,
+ device const char * src0,
+ device char * dst,
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]]) {
+ constexpr short NW = N_SIMDWIDTH;
+
+ const int32_t i3 = tgpig.z;
+ const int32_t i2 = tgpig.y;
+ const int32_t i1 = tgpig.x;
+
+ device const float * src0_ptr = (device const float *)(src0 + i2*args.nb02 + i3*args.nb03);
+ device float * dst_ptr = (device float *)(dst + i1*args.nb01 + i2*args.nb2 + i3*args.nb3);
+
+ for (int i0 = tiitg; i0 < args.ne0; i0 += NW) {
+ dst_ptr[i0] = i0 == i1 ? src0_ptr[i0] : 0.0f;
+ }
+}
+
+constant bool FC_mul_mm_bc_inp [[function_constant(FC_MUL_MM + 0)]];
+constant bool FC_mul_mm_bc_out [[function_constant(FC_MUL_MM + 1)]];
+
+// each block_q contains 16*nl weights
+template<typename S0, typename S0_4x4, typename S0_8x8, typename S1, typename S1_2x4, typename S1_8x8, typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread S0_4x4 &), typename T0, typename T0_4x4, typename T1, typename T1_2x4>
+kernel void kernel_mul_mm(
+ constant ggml_metal_kargs_mul_mm & args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+
+ threadgroup S0 * sa = (threadgroup S0 *)(shmem);
+ threadgroup S1 * sb = (threadgroup S1 *)(shmem + 4096);
+
+ threadgroup float * sc = (threadgroup float *)(shmem);
+
+ constexpr int NR0 = 64;
+ constexpr int NR1 = 32;
+
+ constexpr int NK = 32;
+ constexpr int NL0 = NK/16;
+ constexpr int NL1 = NK/8;
+
+ const int im = tgpig.z;
+ const int r0 = tgpig.y*NR0;
+ const int r1 = tgpig.x*NR1;
+
+ // if this block is of 64x32 shape or smaller
+ const short nr0 = (args.ne0 - r0 < NR0) ? (args.ne0 - r0) : NR0;
+ const short nr1 = (args.ne1 - r1 < NR1) ? (args.ne1 - r1) : NR1;
+
+ // a thread shouldn't load data outside of the matrix
+ const short lr0 = ((short)tiitg/NL0) < nr0 ? ((short)tiitg/NL0) : nr0 - 1; // 0 .. 63
+ const short lr1 = ((short)tiitg/NL1) < nr1 ? ((short)tiitg/NL1) : nr1 - 1; // 0 .. 31
+
+ const short il0 = (tiitg % NL0);
+
+ short il = il0;
+
+ const int i12 = im%args.ne12;
+ const int i13 = im/args.ne12;
+
+ const uint64_t offset0 = (i12/args.r2)*args.nb02 + (i13/args.r3)*args.nb03;
+ const short offset1 = il0/nl;
+
+ device const block_q * x = (device const block_q *)(src0 + args.nb01*(r0 + lr0) + offset0) + offset1;
+
+ const short iy = 8*(tiitg % NL1);
+
+ device const T1 * y = (device const T1 *)(src1
+ + args.nb13*i13
+ + args.nb12*i12
+ + args.nb11*(r1 + lr1)
+ + args.nb10*iy);
+
+#ifndef GGML_METAL_HAS_TENSOR
+ S0_8x8 ma[4];
+ S1_8x8 mb[2];
+
+ simdgroup_float8x8 mc[8];
+
+ for (short i = 0; i < 8; i++){
+ mc[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
+ }
+#else
+ auto tA = tensor<threadgroup S0, dextents<int32_t, 2>, tensor_inline>(sa, dextents<int32_t, 2>(NK, NR0));
+ auto tB = tensor<threadgroup S1, dextents<int32_t, 2>, tensor_inline>(sb, dextents<int32_t, 2>(NR1, NK ));
+
+ mpp::tensor_ops::matmul2d<
+ mpp::tensor_ops::matmul2d_descriptor(NR1, NR0, NK, false, true, false, mpp::tensor_ops::matmul2d_descriptor::mode::multiply_accumulate),
+ execution_simdgroups<4>> mm;
+
+ auto cT = mm.get_destination_cooperative_tensor<decltype(tA), decltype(tB), float>();
+#endif
+
+ for (int loop_k = 0; loop_k < args.ne00; loop_k += NK) {
+#ifndef GGML_METAL_HAS_TENSOR
+ // load data and store to threadgroup memory
+ if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // no need for dequantization
+ for (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ //const short lx = i%8;
+ //const short ly = (tiitg/NL0)%8;
+ const short lx = (tiitg/NL0)%8;
+ const short ly = i%8;
+
+ const short ib = 8*sx + sy;
+
+ *(sa + 64*ib + 8*ly + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0;
+ }
+ } else {
+ S0_4x4 temp_a;
+ dequantize_func(x, il, temp_a);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ FOR_UNROLL (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ //const short lx = i%8;
+ //const short ly = (tiitg/NL0)%8;
+ const short lx = (tiitg/NL0)%8;
+ const short ly = i%8;
+
+ const short ib = 8*sx + sy;
+
+ // NOTE: this is massively slower.. WTF?
+ //sa[64*ib + 8*ly + lx] = temp_a[i/4][i%4];
+
+ *(sa + 64*ib + 8*ly + lx) = temp_a[i/4][i%4];
+ }
+ }
+
+ if (FC_mul_mm_bc_inp) {
+ for (short i = 0; i < 8; ++i) {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ const short lx = i;
+ const short ly = (tiitg/NL1)%8;
+ //const short lx = (tiitg/NL1)%8;
+ //const short ly = i;
+
+ const short ib = 4*sx + sy;
+
+ *(sb + 64*ib + 8*ly + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0;
+ }
+ } else {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ const short dx = sx;
+ const short dy = sy;
+
+ const short ly = (tiitg/NL1)%8;
+
+ const short ib = 4*sx + sy;
+
+ *(threadgroup S1_2x4 *)(sb + 64*ib + 8*ly) = (S1_2x4)(*((device T1_2x4 *) y));
+ }
+#else
+ // load data and store to threadgroup memory
+ if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // no need for dequantization
+ for (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ const short lx = i%8;
+ const short ly = (tiitg/NL0)%8;
+ //const short lx = (tiitg/NL0)%8;
+ //const short ly = i%8;
+
+ *(sa + NK*(8*sy + ly) + 8*sx + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0;
+ }
+ } else {
+ S0_4x4 temp_a;
+ dequantize_func(x, il, temp_a);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ FOR_UNROLL (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ const short lx = i%8;
+ const short ly = (tiitg/NL0)%8;
+ //const short lx = (tiitg/NL0)%8;
+ //const short ly = i%8;
+
+ *(sa + NK*(8*sy + ly) + 8*sx + lx) = temp_a[i/4][i%4];
+ }
+ }
+
+ if (FC_mul_mm_bc_inp) {
+ for (short i = 0; i < 8; ++i) {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ const short lx = i;
+ const short ly = (tiitg/NL1)%8;
+ //const short lx = (tiitg/NL1)%8;
+ //const short ly = i;
+
+ *(sb + NK*(8*sy + ly) + 8*sx + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0;
+ }
+ } else {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ //const short lx = i;
+ const short ly = (tiitg/NL1)%8;
+ //const short lx = (tiitg/NL1)%8;
+ //const short ly = i;
+
+ *(threadgroup S1_2x4 *)(sb + NK*(8*sy + ly) + 8*sx) = (S1_2x4)(*((device T1_2x4 *) y));
+ }
+#endif
+
+ il = (il + 2 < nl) ? il + 2 : il % 2;
+ x = (il < 2) ? x + (2 + nl - 1)/nl : x;
+
+ y += NK;
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+#ifndef GGML_METAL_HAS_TENSOR
+ // load matrices from threadgroup memory and conduct outer products
+ threadgroup const S0 * lsma = (sa + 4*64*(sgitg%2));
+ threadgroup const S1 * lsmb = (sb + 2*64*(sgitg/2));
+
+ FOR_UNROLL (short ik = 0; ik < NK/8; ik++) {
+ simdgroup_barrier(mem_flags::mem_none);
+
+ FOR_UNROLL (short i = 0; i < 4; i++) {
+ simdgroup_load(ma[i], lsma + 64*i, 8, 0, false);
+ }
+
+ simdgroup_barrier(mem_flags::mem_none);
+
+ FOR_UNROLL (short i = 0; i < 2; i++) {
+ simdgroup_load(mb[i], lsmb + 64*i, 8, 0, false);
+ }
+
+ simdgroup_barrier(mem_flags::mem_none);
+
+ FOR_UNROLL (short i = 0; i < 8; i++){
+ simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]);
+ }
+
+ lsma += 8*64;
+ lsmb += 4*64;
+ }
+#else
+ auto sA = tA.slice(0, 0);
+ auto sB = tB.slice(0, 0);
+
+ mm.run(sB, sA, cT);
+#endif
+ }
+
+ if (!FC_mul_mm_bc_out || (r0 + NR0 <= args.ne0 && r1 + NR1 <= args.ne1)) {
+ // if no bounds checks on the output are needed, we can directly write to device memory
+#ifdef GGML_METAL_HAS_TENSOR
+ device float * C = (device float *) dst +
+ r0 + \
+ r1 * args.ne0 + im*args.ne1*args.ne0;
+
+ auto tC = tensor<device float, dextents<int32_t, 2>, tensor_inline>(C, dextents<int32_t, 2>(args.ne0, NR1));
+ cT.store(tC);
+#else
+ device float * C = (device float *) dst +
+ (r0 + 32*(sgitg & 1)) + \
+ (r1 + 16*(sgitg >> 1)) * args.ne0 + im*args.ne1*args.ne0;
+
+ for (short i = 0; i < 8; i++) {
+ simdgroup_store(mc[i], C + 8*(i%4) + 8*args.ne0*(i/4), args.ne0, 0, false);
+ }
+#endif
+ } else {
+ // block is smaller than 64x32, we should avoid writing data outside of the matrix
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ threadgroup float * temp_str = ((threadgroup float *) shmem) + 32*(sgitg&1) + (16*(sgitg >> 1))*NR0;
+
+#ifdef GGML_METAL_HAS_TENSOR
+ auto tC = tensor<threadgroup float, dextents<int32_t, 2>, tensor_inline>(sc, dextents<int32_t, 2>(NR0, NR1));
+ cT.store(tC);
+#else
+ for (short i = 0; i < 8; i++) {
+ simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*NR0*(i/4), NR0, 0, false);
+ }
+#endif
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (sgitg == 0) {
+ for (int j = tiitg; j < nr1; j += NR1) {
+ device float * D = (device float *) dst + r0 + (r1 + j)*args.ne0 + im*args.ne1*args.ne0;
+ device float4 * D4 = (device float4 *) D;
+
+ threadgroup float * C = temp_str + (j*NR0);
+ threadgroup float4 * C4 = (threadgroup float4 *) C;
+
+ int i = 0;
+ for (; i < nr0/4; i++) {
+ *(D4 + i) = *(C4 + i);
+ }
+
+ i *= 4;
+ for (; i < nr0; i++) {
+ *(D + i) = *(C + i);
+ }
+ }
+ }
+ }
+}
+
+template<short ne20> // n_expert_used
+kernel void kernel_mul_mm_id_map0(
+ constant ggml_metal_kargs_mul_mm_id_map0 & args,
+ device const char * src2,
+ device char * htpe,
+ device char * hids,
+ threadgroup char * shmem [[threadgroup(0)]],
+ ushort tpitg[[thread_position_in_threadgroup]],
+ ushort ntg[[threads_per_threadgroup]]) {
+ const short ide = tpitg; // expert id
+
+ uint32_t n_all = 0;
+
+ device int32_t * ids_i32 = (device int32_t *) hids + ide*args.ne21;
+
+ for (int i21 = 0; i21 < args.ne21; i21 += ntg) { // n_tokens
+ if (i21 + tpitg < args.ne21) {
+ device const int32_t * src2_i32 = (device const int32_t *) (src2 + (i21 + tpitg)*args.nb21);
+
+ threadgroup uint16_t * sids = (threadgroup uint16_t *) shmem + tpitg*ne20;
+
+ #pragma unroll(ne20)
+ for (short i20 = 0; i20 < ne20; i20++) {
+ sids[i20] = src2_i32[i20];
+ }
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (short t = 0; t < ntg; t++) {
+ if (i21 + t >= args.ne21) {
+ break;
+ }
+
+ threadgroup const uint16_t * sids = (threadgroup const uint16_t *) shmem + t*ne20;
+
+ short sel = 0;
+ #pragma unroll(ne20)
+ for (short i20 = 0; i20 < ne20; i20++) {
+ sel += (sids[i20] == ide)*(i20 + 1);
+ }
+
+ ids_i32[n_all] = (i21 + t)*ne20 + sel - 1;
+
+ n_all += sel > 0;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+ }
+
+ device uint32_t * tpe_u32 = (device uint32_t *) (htpe);
+ tpe_u32[ide] = n_all;
+}
+
+typedef decltype(kernel_mul_mm_id_map0<1>) kernel_mul_mm_id_map0_t;
+
+template [[host_name("kernel_mul_mm_id_map0_ne20_1" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<1>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_2" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<2>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_4" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<4>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_5" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<5>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_6" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<6>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_8" )]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<8>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_10")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<10>;
+template [[host_name("kernel_mul_mm_id_map0_ne20_16")]] kernel kernel_mul_mm_id_map0_t kernel_mul_mm_id_map0<16>;
+
+template<typename S0, typename S0_4x4, typename S0_8x8, typename S1, typename S1_2x4, typename S1_8x8, typename block_q, short nl, void (*dequantize_func)(device const block_q *, short, thread S0_4x4 &), typename T0, typename T0_4x4, typename T1, typename T1_2x4>
+kernel void kernel_mul_mm_id(
+ constant ggml_metal_kargs_mul_mm_id & args,
+ device const char * src0,
+ device const char * src1,
+ device const char * htpe,
+ device const char * hids,
+ device char * dst,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ threadgroup S0 * sa = (threadgroup S0 *)(shmem);
+ threadgroup S1 * sb = (threadgroup S1 *)(shmem + 4096);
+
+ threadgroup float * sc = (threadgroup float *)(shmem);
+
+ constexpr int NR0 = 64;
+ constexpr int NR1 = 32;
+
+ constexpr int NK = 32;
+ constexpr int NL0 = NK/16;
+ constexpr int NL1 = NK/8;
+
+ const int im = tgpig.z; // expert
+ const int r0 = tgpig.y*NR0;
+ const int r1 = tgpig.x*NR1;
+
+ device const uint32_t * tpe_u32 = (device const uint32_t *) (htpe);
+ device const int32_t * ids_i32 = (device const int32_t *) (hids);
+
+ const int32_t neh1 = tpe_u32[im];
+
+ if (r1 >= neh1) {
+ return;
+ }
+
+ // if this block is of 64x32 shape or smaller
+ const short nr0 = (args.ne0 - r0 < NR0) ? (args.ne0 - r0) : NR0;
+ const short nr1 = ( neh1 - r1 < NR1) ? ( neh1 - r1) : NR1;
+
+ // a thread shouldn't load data outside of the matrix
+ const short lr0 = ((short)tiitg/NL0) < nr0 ? ((short)tiitg/NL0) : nr0 - 1; // 0 .. 63
+ const short lr1 = ((short)tiitg/NL1) < nr1 ? ((short)tiitg/NL1) : nr1 - 1; // 0 .. 31
+
+ const short il0 = (tiitg % NL0);
+
+ short il = il0;
+
+ const int id = ids_i32[im*args.ne21 + r1 + lr1];
+
+ const short i11 = (id % args.ne20) % args.ne11;
+ const short i12 = (id / args.ne20);
+ const short i13 = 0;
+
+ const uint64_t offset0 = im*args.nb02 + i13*args.nb03;
+ const short offset1 = il0/nl;
+
+ device const block_q * x = (device const block_q *)(src0 + args.nb01*(r0 + lr0) + offset0) + offset1;
+
+ const short iy = 8*(tiitg % NL1);
+
+ device const T1 * y = (device const T1 *)(src1
+ + args.nb13*i13
+ + args.nb12*i12
+ + args.nb11*i11
+ + args.nb10*iy);
+
+#ifndef GGML_METAL_HAS_TENSOR
+ S0_8x8 ma[4];
+ S1_8x8 mb[2];
+
+ simdgroup_float8x8 mc[8];
+
+ for (short i = 0; i < 8; i++){
+ mc[i] = make_filled_simdgroup_matrix<float, 8>(0.f);
+ }
+#else
+ auto tA = tensor<threadgroup S0, dextents<int32_t, 2>, tensor_inline>(sa, dextents<int32_t, 2>(NK, NR0));
+ auto tB = tensor<threadgroup S1, dextents<int32_t, 2>, tensor_inline>(sb, dextents<int32_t, 2>(NR1, NK ));
+
+ mpp::tensor_ops::matmul2d<
+ mpp::tensor_ops::matmul2d_descriptor(NR1, NR0, NK, false, true, false, mpp::tensor_ops::matmul2d_descriptor::mode::multiply_accumulate),
+ execution_simdgroups<4>> mm;
+
+ auto cT = mm.get_destination_cooperative_tensor<decltype(tA), decltype(tB), float>();
+#endif
+
+ for (int loop_k = 0; loop_k < args.ne00; loop_k += NK) {
+#ifndef GGML_METAL_HAS_TENSOR
+ // load data and store to threadgroup memory
+ if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // no need for dequantization
+ for (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ //const short lx = i%8;
+ //const short ly = (tiitg/NL0)%8;
+ const short lx = (tiitg/NL0)%8;
+ const short ly = i%8;
+
+ const short ib = 8*sx + sy;
+
+ *(sa + 64*ib + 8*ly + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0;
+ }
+ } else {
+ S0_4x4 temp_a;
+ dequantize_func(x, il, temp_a);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ FOR_UNROLL (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ //const short lx = i%8;
+ //const short ly = (tiitg/NL0)%8;
+ const short lx = (tiitg/NL0)%8;
+ const short ly = i%8;
+
+ const short ib = 8*sx + sy;
+
+ // NOTE: this is massively slower.. WTF?
+ //sa[64*ib + 8*ly + lx] = temp_a[i/4][i%4];
+
+ *(sa + 64*ib + 8*ly + lx) = temp_a[i/4][i%4];
+ }
+ }
+
+ if (FC_mul_mm_bc_inp) {
+ for (short i = 0; i < 8; ++i) {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ const short lx = i;
+ const short ly = (tiitg/NL1)%8;
+ //const short lx = (tiitg/NL1)%8;
+ //const short ly = i;
+
+ const short ib = 4*sx + sy;
+
+ *(sb + 64*ib + 8*ly + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0;
+ }
+ } else {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ const short dx = sx;
+ const short dy = sy;
+
+ const short ly = (tiitg/NL1)%8;
+
+ const short ib = 4*sx + sy;
+
+ *(threadgroup S1_2x4 *)(sb + 64*ib + 8*ly) = (S1_2x4)(*((device T1_2x4 *) y));
+ }
+#else
+ // load data and store to threadgroup memory
+ if (is_same<T0_4x4, block_q>::value && FC_mul_mm_bc_inp) {
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ // no need for dequantization
+ for (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ const short lx = i%8;
+ const short ly = (tiitg/NL0)%8;
+ //const short lx = (tiitg/NL0)%8;
+ //const short ly = i%8;
+
+ *(sa + NK*(8*sy + ly) + 8*sx + lx) = loop_k + 16*il + i < args.ne00 ? *((device T0 *) x + i) : 0;
+ }
+ } else {
+ S0_4x4 temp_a;
+ dequantize_func(x, il, temp_a);
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ FOR_UNROLL (short i = 0; i < 16; i++) {
+ const short sx = 2*il0 + i/8;
+ const short sy = (tiitg/NL0)/8;
+
+ const short lx = i%8;
+ const short ly = (tiitg/NL0)%8;
+ //const short lx = (tiitg/NL0)%8;
+ //const short ly = i%8;
+
+ *(sa + NK*(8*sy + ly) + 8*sx + lx) = temp_a[i/4][i%4];
+ }
+ }
+
+ if (FC_mul_mm_bc_inp) {
+ for (short i = 0; i < 8; ++i) {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ const short lx = i;
+ const short ly = (tiitg/NL1)%8;
+ //const short lx = (tiitg/NL1)%8;
+ //const short ly = i;
+
+ *(sb + NK*(8*sy + ly) + 8*sx + lx) = loop_k + iy + i < args.ne00 ? (S1) *((device T1 *) y + i) : 0;
+ }
+ } else {
+ const short sx = (tiitg%NL1);
+ const short sy = (tiitg/NL1)/8;
+
+ //const short lx = i;
+ const short ly = (tiitg/NL1)%8;
+ //const short lx = (tiitg/NL1)%8;
+ //const short ly = i;
+
+ *(threadgroup S1_2x4 *)(sb + NK*(8*sy + ly) + 8*sx) = (S1_2x4)(*((device T1_2x4 *) y));
+ }
+#endif
+
+ il = (il + 2 < nl) ? il + 2 : il % 2;
+ x = (il < 2) ? x + (2 + nl - 1)/nl : x;
+
+ y += NK;
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+#ifndef GGML_METAL_HAS_TENSOR
+ // load matrices from threadgroup memory and conduct outer products
+ threadgroup const S0 * lsma = (sa + 4*64*(sgitg%2));
+ threadgroup const S1 * lsmb = (sb + 2*64*(sgitg/2));
+
+ FOR_UNROLL (short ik = 0; ik < NK/8; ik++) {
+ simdgroup_barrier(mem_flags::mem_none);
+
+ FOR_UNROLL (short i = 0; i < 4; i++) {
+ simdgroup_load(ma[i], lsma + 64*i, 8, 0, false);
+ }
+
+ simdgroup_barrier(mem_flags::mem_none);
+
+ FOR_UNROLL (short i = 0; i < 2; i++) {
+ simdgroup_load(mb[i], lsmb + 64*i, 8, 0, false);
+ }
+
+ simdgroup_barrier(mem_flags::mem_none);
+
+ FOR_UNROLL (short i = 0; i < 8; i++){
+ simdgroup_multiply_accumulate(mc[i], mb[i/4], ma[i%4], mc[i]);
+ }
+
+ lsma += 8*64;
+ lsmb += 4*64;
+ }
+#else
+ auto sA = tA.slice(0, 0);
+ auto sB = tB.slice(0, 0);
+
+ mm.run(sB, sA, cT);
+#endif
+ }
+
+ // block is smaller than 64x32, we should avoid writing data outside of the matrix
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+#ifdef GGML_METAL_HAS_TENSOR
+ auto tC = tensor<threadgroup float, dextents<int32_t, 2>, tensor_inline>(sc, dextents<int32_t, 2>(NR0, NR1));
+ cT.store(tC);
+#else
+ threadgroup float * temp_str = ((threadgroup float *) shmem) + 32*(sgitg&1) + (16*(sgitg >> 1))*NR0;
+
+ for (short i = 0; i < 8; i++) {
+ simdgroup_store(mc[i], temp_str + 8*(i%4) + 8*NR0*(i/4), NR0, 0, false);
+ }
+#endif
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ for (short j = sgitg; j < nr1; j += 4) {
+ const int id = ids_i32[im*args.ne21 + r1 + j];
+
+ const short ide = id % args.ne20;
+ const short idt = id / args.ne20;
+
+ device float * D = (device float *) dst + r0 + ide*args.ne0 + idt*args.ne1*args.ne0;
+ device float4 * D4 = (device float4 *) D;
+
+ threadgroup float * C = (threadgroup float *) shmem + j*NR0;
+ threadgroup float4 * C4 = (threadgroup float4 *) C;
+
+ int i = tiisg;
+ for (; i < nr0/4; i += 32) {
+ *(D4 + i) = *(C4 + i);
+ }
+
+ i = (4*(nr0/4)) + tiisg;
+ for (; i < nr0; i += 32) {
+ *(D + i) = *(C + i);
+ }
+ }
+}
+
+#define QK_NL 16
+
+//
+// get rows
+//
+
+typedef decltype(kernel_get_rows_f<float, float>) get_rows_f_t;
+
+template [[host_name("kernel_get_rows_f32")]] kernel get_rows_f_t kernel_get_rows_f<float, float>;
+template [[host_name("kernel_get_rows_f16")]] kernel get_rows_f_t kernel_get_rows_f<half, float>;
+template [[host_name("kernel_get_rows_i32")]] kernel get_rows_f_t kernel_get_rows_f<int32_t, int32_t>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_get_rows_bf16")]] kernel get_rows_f_t kernel_get_rows_f<bfloat, float>;
+#endif
+
+typedef decltype(kernel_get_rows_q<block_q4_0, 2, dequantize_q4_0>) get_rows_q_t;
+
+template [[host_name("kernel_get_rows_q4_0")]] kernel get_rows_q_t kernel_get_rows_q<block_q4_0, 2, dequantize_q4_0>;
+template [[host_name("kernel_get_rows_q4_1")]] kernel get_rows_q_t kernel_get_rows_q<block_q4_1, 2, dequantize_q4_1>;
+template [[host_name("kernel_get_rows_q5_0")]] kernel get_rows_q_t kernel_get_rows_q<block_q5_0, 2, dequantize_q5_0>;
+template [[host_name("kernel_get_rows_q5_1")]] kernel get_rows_q_t kernel_get_rows_q<block_q5_1, 2, dequantize_q5_1>;
+template [[host_name("kernel_get_rows_q8_0")]] kernel get_rows_q_t kernel_get_rows_q<block_q8_0, 2, dequantize_q8_0>;
+template [[host_name("kernel_get_rows_mxfp4")]] kernel get_rows_q_t kernel_get_rows_q<block_mxfp4, 2, dequantize_mxfp4>;
+template [[host_name("kernel_get_rows_q2_K")]] kernel get_rows_q_t kernel_get_rows_q<block_q2_K, QK_NL, dequantize_q2_K>;
+template [[host_name("kernel_get_rows_q3_K")]] kernel get_rows_q_t kernel_get_rows_q<block_q3_K, QK_NL, dequantize_q3_K>;
+template [[host_name("kernel_get_rows_q4_K")]] kernel get_rows_q_t kernel_get_rows_q<block_q4_K, QK_NL, dequantize_q4_K>;
+template [[host_name("kernel_get_rows_q5_K")]] kernel get_rows_q_t kernel_get_rows_q<block_q5_K, QK_NL, dequantize_q5_K>;
+template [[host_name("kernel_get_rows_q6_K")]] kernel get_rows_q_t kernel_get_rows_q<block_q6_K, QK_NL, dequantize_q6_K>;
+template [[host_name("kernel_get_rows_iq2_xxs")]] kernel get_rows_q_t kernel_get_rows_q<block_iq2_xxs, QK_NL, dequantize_iq2_xxs>;
+template [[host_name("kernel_get_rows_iq2_xs")]] kernel get_rows_q_t kernel_get_rows_q<block_iq2_xs, QK_NL, dequantize_iq2_xs>;
+template [[host_name("kernel_get_rows_iq3_xxs")]] kernel get_rows_q_t kernel_get_rows_q<block_iq3_xxs, QK_NL, dequantize_iq3_xxs>;
+template [[host_name("kernel_get_rows_iq3_s")]] kernel get_rows_q_t kernel_get_rows_q<block_iq3_s, QK_NL, dequantize_iq3_s>;
+template [[host_name("kernel_get_rows_iq2_s")]] kernel get_rows_q_t kernel_get_rows_q<block_iq2_s, QK_NL, dequantize_iq2_s>;
+template [[host_name("kernel_get_rows_iq1_s")]] kernel get_rows_q_t kernel_get_rows_q<block_iq1_s, QK_NL, dequantize_iq1_s>;
+template [[host_name("kernel_get_rows_iq1_m")]] kernel get_rows_q_t kernel_get_rows_q<block_iq1_m, QK_NL, dequantize_iq1_m>;
+template [[host_name("kernel_get_rows_iq4_nl")]] kernel get_rows_q_t kernel_get_rows_q<block_iq4_nl, 2, dequantize_iq4_nl>;
+template [[host_name("kernel_get_rows_iq4_xs")]] kernel get_rows_q_t kernel_get_rows_q<block_iq4_xs, QK_NL, dequantize_iq4_xs>;
+
+//
+// set rows
+//
+
+typedef decltype(kernel_set_rows_f<float, int64_t>) set_rows_f_t;
+
+template [[host_name("kernel_set_rows_f32_i64")]] kernel set_rows_f_t kernel_set_rows_f<float, int64_t>;
+template [[host_name("kernel_set_rows_f32_i32")]] kernel set_rows_f_t kernel_set_rows_f<float, int32_t>;
+template [[host_name("kernel_set_rows_f16_i64")]] kernel set_rows_f_t kernel_set_rows_f<half, int64_t>;
+template [[host_name("kernel_set_rows_f16_i32")]] kernel set_rows_f_t kernel_set_rows_f<half, int32_t>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_set_rows_bf16_i64")]] kernel set_rows_f_t kernel_set_rows_f<bfloat, int64_t>;
+template [[host_name("kernel_set_rows_bf16_i32")]] kernel set_rows_f_t kernel_set_rows_f<bfloat, int32_t>;
+#endif
+
+typedef decltype(kernel_set_rows_q32<int64_t, block_q8_0, quantize_q8_0>) set_rows_q32_t;
+
+template [[host_name("kernel_set_rows_q8_0_i64")]] kernel set_rows_q32_t kernel_set_rows_q32<int64_t, block_q8_0, quantize_q8_0>;
+template [[host_name("kernel_set_rows_q8_0_i32")]] kernel set_rows_q32_t kernel_set_rows_q32<int32_t, block_q8_0, quantize_q8_0>;
+template [[host_name("kernel_set_rows_q4_0_i64")]] kernel set_rows_q32_t kernel_set_rows_q32<int64_t, block_q4_0, quantize_q4_0>;
+template [[host_name("kernel_set_rows_q4_0_i32")]] kernel set_rows_q32_t kernel_set_rows_q32<int32_t, block_q4_0, quantize_q4_0>;
+template [[host_name("kernel_set_rows_q4_1_i64")]] kernel set_rows_q32_t kernel_set_rows_q32<int64_t, block_q4_1, quantize_q4_1>;
+template [[host_name("kernel_set_rows_q4_1_i32")]] kernel set_rows_q32_t kernel_set_rows_q32<int32_t, block_q4_1, quantize_q4_1>;
+template [[host_name("kernel_set_rows_q5_0_i64")]] kernel set_rows_q32_t kernel_set_rows_q32<int64_t, block_q5_0, quantize_q5_0>;
+template [[host_name("kernel_set_rows_q5_0_i32")]] kernel set_rows_q32_t kernel_set_rows_q32<int32_t, block_q5_0, quantize_q5_0>;
+template [[host_name("kernel_set_rows_q5_1_i64")]] kernel set_rows_q32_t kernel_set_rows_q32<int64_t, block_q5_1, quantize_q5_1>;
+template [[host_name("kernel_set_rows_q5_1_i32")]] kernel set_rows_q32_t kernel_set_rows_q32<int32_t, block_q5_1, quantize_q5_1>;
+template [[host_name("kernel_set_rows_iq4_nl_i64")]] kernel set_rows_q32_t kernel_set_rows_q32<int64_t, block_iq4_nl, quantize_iq4_nl>;
+template [[host_name("kernel_set_rows_iq4_nl_i32")]] kernel set_rows_q32_t kernel_set_rows_q32<int32_t, block_iq4_nl, quantize_iq4_nl>;
+
+//
+// matrix-matrix multiplication
+//
+
+typedef decltype(kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, float4x4, 1, dequantize_f32, float, float4x4, float, float2x4>) mul_mm_t;
+
+template [[host_name("kernel_mul_mm_f32_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, float4x4, 1, dequantize_f32, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_f16_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, half4x4, 1, dequantize_f16, half, half4x4, float, float2x4>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_mul_mm_bf16_f32")]] kernel mul_mm_t kernel_mul_mm<bfloat, bfloat4x4, simdgroup_bfloat8x8, bfloat, bfloat2x4, simdgroup_bfloat8x8, bfloat4x4, 1, dequantize_bf16, bfloat, bfloat4x4, float, float2x4>;
+#endif
+template [[host_name("kernel_mul_mm_q4_0_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_0, 2, dequantize_q4_0, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q4_1_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_1, 2, dequantize_q4_1, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q5_0_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_0, 2, dequantize_q5_0, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q5_1_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_1, 2, dequantize_q5_1, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q8_0_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q8_0, 2, dequantize_q8_0, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_mxfp4_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_mxfp4, 2, dequantize_mxfp4, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q2_K_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q2_K, QK_NL, dequantize_q2_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q3_K_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q3_K, QK_NL, dequantize_q3_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q4_K_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_K, QK_NL, dequantize_q4_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q5_K_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_K, QK_NL, dequantize_q5_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_q6_K_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q6_K, QK_NL, dequantize_q6_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq2_xxs_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xxs, QK_NL, dequantize_iq2_xxs, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq2_xs_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xs, QK_NL, dequantize_iq2_xs, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq3_xxs_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_xxs, QK_NL, dequantize_iq3_xxs, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq3_s_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_s, QK_NL, dequantize_iq3_s, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq2_s_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_s, QK_NL, dequantize_iq2_s, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq1_s_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_s, QK_NL, dequantize_iq1_s, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq1_m_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_m, QK_NL, dequantize_iq1_m, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq4_nl_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_nl, 2, dequantize_iq4_nl, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_iq4_xs_f32")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_xs, QK_NL, dequantize_iq4_xs, float, float4x4, float, float2x4>;
+
+template [[host_name("kernel_mul_mm_f32_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, float4x4, 1, dequantize_f32, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_f16_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, half4x4, 1, dequantize_f16, half, half4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q4_0_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_0, 2, dequantize_q4_0, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q4_1_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_1, 2, dequantize_q4_1, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q5_0_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_0, 2, dequantize_q5_0, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q5_1_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_1, 2, dequantize_q5_1, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q8_0_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q8_0, 2, dequantize_q8_0, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_mxfp4_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_mxfp4, 2, dequantize_mxfp4, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q2_K_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q2_K, QK_NL, dequantize_q2_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q3_K_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q3_K, QK_NL, dequantize_q3_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q4_K_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_K, QK_NL, dequantize_q4_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q5_K_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_K, QK_NL, dequantize_q5_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_q6_K_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q6_K, QK_NL, dequantize_q6_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq2_xxs_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xxs, QK_NL, dequantize_iq2_xxs, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq2_xs_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xs, QK_NL, dequantize_iq2_xs, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq3_xxs_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_xxs, QK_NL, dequantize_iq3_xxs, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq3_s_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_s, QK_NL, dequantize_iq3_s, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq2_s_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_s, QK_NL, dequantize_iq2_s, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq1_s_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_s, QK_NL, dequantize_iq1_s, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq1_m_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_m, QK_NL, dequantize_iq1_m, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq4_nl_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_nl, 2, dequantize_iq4_nl, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_iq4_xs_f16")]] kernel mul_mm_t kernel_mul_mm<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_xs, QK_NL, dequantize_iq4_xs, float, float4x4, half, half2x4>;
+
+//
+// indirect matrix-matrix multiplication
+//
+
+typedef decltype(kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, float4x4, 1, dequantize_f32, float, float4x4, float, float2x4>) mul_mm_id;
+
+template [[host_name("kernel_mul_mm_id_f32_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, float4x4, 1, dequantize_f32, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_f16_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, half4x4, 1, dequantize_f16, half, half4x4, float, float2x4>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_mul_mm_id_bf16_f32")]] kernel mul_mm_id kernel_mul_mm_id<bfloat, bfloat4x4, simdgroup_bfloat8x8, bfloat, bfloat2x4, simdgroup_bfloat8x8, bfloat4x4, 1, dequantize_bf16, bfloat, bfloat4x4, float, float2x4>;
+#endif
+template [[host_name("kernel_mul_mm_id_q4_0_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_0, 2, dequantize_q4_0, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q4_1_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_1, 2, dequantize_q4_1, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q5_0_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_0, 2, dequantize_q5_0, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q5_1_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_1, 2, dequantize_q5_1, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q8_0_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q8_0, 2, dequantize_q8_0, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_mxfp4_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_mxfp4, 2, dequantize_mxfp4, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q2_K_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q2_K, QK_NL, dequantize_q2_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q3_K_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q3_K, QK_NL, dequantize_q3_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q4_K_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_K, QK_NL, dequantize_q4_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q5_K_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_K, QK_NL, dequantize_q5_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_q6_K_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q6_K, QK_NL, dequantize_q6_K, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq2_xxs_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xxs, QK_NL, dequantize_iq2_xxs, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq2_xs_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xs, QK_NL, dequantize_iq2_xs, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq3_xxs_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_xxs, QK_NL, dequantize_iq3_xxs, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq3_s_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_s, QK_NL, dequantize_iq3_s, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq2_s_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_s, QK_NL, dequantize_iq2_s, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq1_s_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_s, QK_NL, dequantize_iq1_s, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq1_m_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_m, QK_NL, dequantize_iq1_m, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq4_nl_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_nl, 2, dequantize_iq4_nl, float, float4x4, float, float2x4>;
+template [[host_name("kernel_mul_mm_id_iq4_xs_f32")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_xs, QK_NL, dequantize_iq4_xs, float, float4x4, float, float2x4>;
+
+template [[host_name("kernel_mul_mm_id_f32_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, float4x4, 1, dequantize_f32, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_f16_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, half4x4, 1, dequantize_f16, half, half4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q4_0_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_0, 2, dequantize_q4_0, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q4_1_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_1, 2, dequantize_q4_1, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q5_0_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_0, 2, dequantize_q5_0, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q5_1_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_1, 2, dequantize_q5_1, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q8_0_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q8_0, 2, dequantize_q8_0, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_mxfp4_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_mxfp4, 2, dequantize_mxfp4, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q2_K_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q2_K, QK_NL, dequantize_q2_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q3_K_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q3_K, QK_NL, dequantize_q3_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q4_K_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q4_K, QK_NL, dequantize_q4_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q5_K_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q5_K, QK_NL, dequantize_q5_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_q6_K_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_q6_K, QK_NL, dequantize_q6_K, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq2_xxs_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xxs, QK_NL, dequantize_iq2_xxs, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq2_xs_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_xs, QK_NL, dequantize_iq2_xs, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq3_xxs_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_xxs, QK_NL, dequantize_iq3_xxs, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq3_s_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq3_s, QK_NL, dequantize_iq3_s, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq2_s_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq2_s, QK_NL, dequantize_iq2_s, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq1_s_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_s, QK_NL, dequantize_iq1_s, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq1_m_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq1_m, QK_NL, dequantize_iq1_m, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq4_nl_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_nl, 2, dequantize_iq4_nl, float, float4x4, half, half2x4>;
+template [[host_name("kernel_mul_mm_id_iq4_xs_f16")]] kernel mul_mm_id kernel_mul_mm_id<half, half4x4, simdgroup_half8x8, half, half2x4, simdgroup_half8x8, block_iq4_xs, QK_NL, dequantize_iq4_xs, float, float4x4, half, half2x4>;
+
+//
+// matrix-vector multiplication
+//
+
+typedef void (kernel_mul_mv_disp_t)(
+ ggml_metal_kargs_mul_mv args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ uint3 tgpig,
+ ushort tiisg);
+
+typedef void (kernel_mul_mv2_disp_t)(
+ ggml_metal_kargs_mul_mv args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiisg,
+ ushort sgitg);
+
+template<kernel_mul_mv_disp_t disp_fn>
+void mmv_fn(
+ ggml_metal_kargs_mul_mv args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiitg,
+ ushort tiisg,
+ ushort sgitg) {
+ disp_fn(args, src0, src1, dst, tgpig, tiisg);
+}
+
+template<kernel_mul_mv2_disp_t disp_fn>
+void mmv_fn(
+ ggml_metal_kargs_mul_mv args,
+ device const char * src0,
+ device const char * src1,
+ device char * dst,
+ threadgroup char * shmem,
+ uint3 tgpig,
+ ushort tiitg,
+ ushort tiisg,
+ ushort sgitg) {
+ disp_fn(args, src0, src1, dst, shmem, tgpig, tiisg, sgitg);
+}
+
+typedef decltype(mmv_fn<kernel_mul_mv_t_t_disp<half, half, ggml_metal_kargs_mul_mv>>) mul_mv_disp_fn_t;
+
+template<mul_mv_disp_fn_t disp_fn>
+kernel void kernel_mul_mv_id(
+ constant ggml_metal_kargs_mul_mv_id & args,
+ device const char * src0s,
+ device const char * src1,
+ device char * dst,
+ device const char * ids,
+ threadgroup char * shmem [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort tiitg[[thread_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]]) {
+ const int iid1 = tgpig.z/args.nei0;
+ const int idx = tgpig.z%args.nei0;
+
+ tgpig.z = 0;
+
+ const int32_t i02 = ((device const int32_t *) (ids + iid1*args.nbi1))[idx];
+
+ const int64_t i11 = idx % args.ne11;
+ const int64_t i12 = iid1;
+
+ const int64_t i1 = idx;
+ const int64_t i2 = i12;
+
+ device const char * src0_cur = src0s + i02*args.nb02;
+ device const char * src1_cur = src1 + i11*args.nb11 + i12*args.nb12;
+
+ device char * dst_cur = dst + (i1*args.ne0 + i2*args.ne1*args.ne0)*sizeof(float);
+
+ ggml_metal_kargs_mul_mv args0 = {
+ /*.ne00 =*/ args.ne00,
+ /*.ne01 =*/ args.ne01,
+ /*.ne02 =*/ 1, // args.ne02,
+ /*.nb00 =*/ args.nb00,
+ /*.nb01 =*/ args.nb01,
+ /*.nb02 =*/ args.nb02,
+ /*.nb03 =*/ args.nb02, // args.ne02 == 1
+ /*.ne10 =*/ args.ne10,
+ /*.ne11 =*/ 1, // args.ne11,
+ /*.ne12 =*/ 1, // args.ne12,
+ /*.nb10 =*/ args.nb10,
+ /*.nb11 =*/ args.nb11,
+ /*.nb12 =*/ args.nb12,
+ /*.nb13 =*/ args.nb12, // ne12 == 1
+ /*.ne0 =*/ args.ne0,
+ /*.ne1 =*/ 1, // args.ne1,
+ /*.nr0 =*/ args.nr0,
+ /*.r2 =*/ 1,
+ /*.r3 =*/ 1,
+ };
+
+ disp_fn(
+ args0,
+ /* src0 */ src0_cur,
+ /* src1 */ src1_cur,
+ /* dst */ dst_cur,
+ shmem,
+ tgpig,
+ tiitg,
+ tiisg,
+ sgitg);
+}
+
+typedef decltype(kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_disp<float, float>>>) kernel_mul_mv_id_t;
+
+typedef decltype(kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_4_disp<float, float4, float, float4>>>) kernel_mul_mv_id_4_t;
+
+template [[host_name("kernel_mul_mv_id_f32_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_disp<float, float>>>;
+template [[host_name("kernel_mul_mv_id_f16_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_disp<half, float>>>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_mul_mv_id_bf16_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_disp<bfloat, float>>>;
+#endif
+template [[host_name("kernel_mul_mv_id_f32_f32_4")]] kernel kernel_mul_mv_id_4_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_4_disp<float, float4, float, float4>>>;
+template [[host_name("kernel_mul_mv_id_f16_f32_4")]] kernel kernel_mul_mv_id_4_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_4_disp<half, half4, float, float4>>>;
+#if defined(GGML_METAL_HAS_BF16)
+template [[host_name("kernel_mul_mv_id_bf16_f32_4")]] kernel kernel_mul_mv_id_4_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_t_t_4_disp<bfloat, bfloat4, float, float4>>>;
+#endif
+
+template [[host_name("kernel_mul_mv_id_q8_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_q8_0_f32_impl<N_R0_Q8_0>>>;
+
+template [[host_name("kernel_mul_mv_id_q4_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<mul_vec_q_n_f32_impl<block_q4_0, N_R0_Q4_0>>>;
+template [[host_name("kernel_mul_mv_id_q4_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<mul_vec_q_n_f32_impl<block_q4_1, N_R0_Q4_1>>>;
+template [[host_name("kernel_mul_mv_id_q5_0_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<mul_vec_q_n_f32_impl<block_q5_0, N_R0_Q5_0>>>;
+template [[host_name("kernel_mul_mv_id_q5_1_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<mul_vec_q_n_f32_impl<block_q5_1, N_R0_Q5_1>>>;
+
+template [[host_name("kernel_mul_mv_id_mxfp4_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_mxfp4_f32_impl<N_R0_MXFP4>>>;
+
+template [[host_name("kernel_mul_mv_id_q2_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_q2_K_f32_impl <N_R0_Q2_K>>>;
+template [[host_name("kernel_mul_mv_id_q3_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_q3_K_f32_impl <N_R0_Q3_K>>>;
+template [[host_name("kernel_mul_mv_id_q4_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_q4_K_f32_impl <N_R0_Q4_K>>>;
+template [[host_name("kernel_mul_mv_id_q5_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_q5_K_f32_impl <N_R0_Q5_K>>>;
+template [[host_name("kernel_mul_mv_id_q6_K_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_q6_K_f32_impl <N_R0_Q6_K>>>;
+template [[host_name("kernel_mul_mv_id_iq1_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq1_s_f32_impl <N_R0_IQ1_S>>>;
+template [[host_name("kernel_mul_mv_id_iq1_m_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq1_m_f32_impl <N_R0_IQ1_M>>>;
+template [[host_name("kernel_mul_mv_id_iq2_xxs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_xxs_f32_impl<N_R0_IQ2_XXS>>>;
+template [[host_name("kernel_mul_mv_id_iq2_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_xs_f32_impl <N_R0_IQ2_XS>>>;
+template [[host_name("kernel_mul_mv_id_iq3_xxs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq3_xxs_f32_impl<N_R0_IQ3_XXS>>>;
+template [[host_name("kernel_mul_mv_id_iq3_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq3_s_f32_impl <N_R0_IQ3_S>>>;
+template [[host_name("kernel_mul_mv_id_iq2_s_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq2_s_f32_impl <N_R0_IQ2_S>>>;
+template [[host_name("kernel_mul_mv_id_iq4_nl_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_nl_f32_impl <N_R0_IQ4_NL>>>;
+template [[host_name("kernel_mul_mv_id_iq4_xs_f32")]] kernel kernel_mul_mv_id_t kernel_mul_mv_id<mmv_fn<kernel_mul_mv_iq4_xs_f32_impl <N_R0_IQ4_XS>>>;
+
+kernel void kernel_pool_2d_max_f32(
+ constant ggml_metal_kargs_pool_2d & args,
+ device const float * src0,
+ device float * dst,
+ uint gid[[thread_position_in_grid]]) {
+
+ if (gid >= args.np) {
+ return;
+ }
+
+ const int idx = gid;
+ const int I_HW = args.IH * args.IW;
+ const int O_HW = args.OH * args.OW;
+ const int nc = idx / O_HW;
+ const int cur_oh = idx % O_HW / args.OW;
+ const int cur_ow = idx % O_HW % args.OW;
+
+ device const float * i_ptr = src0 + nc * I_HW;
+ device float * o_ptr = dst + nc * O_HW;
+
+ const int start_h = cur_oh * args.s1 - args.p1;
+ const int bh = MAX(0, start_h);
+ const int eh = MIN(args.IH, start_h + args.k1);
+ const int start_w = cur_ow * args.s0 - args.p0;
+ const int bw = MAX(0, start_w);
+ const int ew = MIN(args.IW, start_w + args.k0);
+
+ float res = -INFINITY;
+
+ for (int i = bh; i < eh; i += 1) {
+ for (int j = bw; j < ew; j += 1) {
+ res = MAX(res, i_ptr[i * args.IW + j]);
+ }
+ }
+
+ o_ptr[cur_oh * args.OW + cur_ow] = res;
+}
+
+kernel void kernel_pool_2d_avg_f32(
+ constant ggml_metal_kargs_pool_2d & args,
+ device const float * src0,
+ device float * dst,
+ uint gid[[thread_position_in_grid]]) {
+
+ if (gid >= args.np) {
+ return;
+ }
+
+ const int idx = gid;
+ const int I_HW = args.IH * args.IW;
+ const int O_HW = args.OH * args.OW;
+ const int nc = idx / O_HW;
+ const int cur_oh = idx % O_HW / args.OW;
+ const int cur_ow = idx % O_HW % args.OW;
+
+ device const float * i_ptr = src0 + nc * I_HW;
+ device float * o_ptr = dst + nc * O_HW;
+
+ const int start_h = cur_oh * args.s1 - args.p1;
+ const int bh = MAX(0, start_h);
+ const int eh = MIN(args.IH, start_h + args.k1);
+ const int start_w = cur_ow * args.s0 - args.p0;
+ const int bw = MAX(0, start_w);
+ const int ew = MIN(args.IW, start_w + args.k0);
+ // const float scale = 1. / ((eh - bh) * (ew - bw));
+ const float scale = 1. / (args.k0 * args.k1);
+
+ float res = 0;
+
+ for (int i = bh; i < eh; i += 1) {
+ for (int j = bw; j < ew; j += 1) {
+ float cur = i_ptr[i * args.IW + j];
+ res += cur * scale;
+ }
+ }
+
+ o_ptr[cur_oh * args.OW + cur_ow] = res;
+}
+
+
+kernel void kernel_pool_1d_max_f32(
+ constant ggml_metal_kargs_pool_1d & args,
+ device const float * src,
+ device float * dst,
+ uint gid [[thread_position_in_grid]]
+) {
+
+ if (gid >= args.np) {
+ return;
+ }
+
+ const int ow = (int)gid % args.OW;
+ const int row = (int)gid / args.OW;
+
+ const int base = ow * args.s0 - args.p0;
+
+ float acc = -INFINITY;
+
+ const int src_off = row * args.IW;
+ const int dst_off = row * args.OW;
+
+ for (int ki = 0; ki < args.k0; ++ki) {
+ int j = base + ki;
+ if (j < 0 || j >= args.IW){
+ continue;
+ }
+ float v = src[src_off + j];
+ acc = max(acc, v);
+ }
+
+ dst[dst_off + ow] = acc;
+}
+
+kernel void kernel_pool_1d_avg_f32(
+ constant ggml_metal_kargs_pool_1d & args,
+ device const float * src,
+ device float * dst,
+ uint gid [[thread_position_in_grid]]
+) {
+
+ if (gid >= args.np) {
+ return;
+ }
+
+ const int ow = (int)gid % args.OW;
+ const int row = (int)gid / args.OW;
+
+ const int base = ow * args.s0 - args.p0;
+
+ float acc = 0.0f;
+ int cnt = 0;
+
+ const int src_off = row * args.IW;
+ const int dst_off = row * args.OW;
+
+ for (int ki = 0; ki < args.k0; ++ki) {
+ const int j = base + ki;
+ if (j < 0 || j >= args.IW) {
+ continue;
+ }
+ acc += src[src_off + j];
+ cnt += 1;
+ }
+
+ dst[dst_off + ow] = (cnt > 0) ? (acc / (float)cnt) : 0.0f;
+}
+
+kernel void kernel_opt_step_adamw_f32(
+ constant ggml_metal_kargs_opt_step_adamw & args,
+ device float * x,
+ device const float * g,
+ device float * g_m,
+ device float * g_v,
+ device const float * pars,
+ uint gid[[thread_position_in_grid]]) {
+
+ if (gid >= args.np) {
+ return;
+ }
+
+ const float alpha = pars[0];
+ const float beta1 = pars[1];
+ const float beta2 = pars[2];
+ const float eps = pars[3];
+ const float wd = pars[4];
+ const float beta1h = pars[5];
+ const float beta2h = pars[6];
+
+ const float gi = g[gid];
+ const float gmi = g_m[gid] * beta1 + gi * (1.0f - beta1);
+ const float gvi = g_v[gid] * beta2 + gi * gi * (1.0f - beta2);
+
+ g_m[gid] = gmi;
+ g_v[gid] = gvi;
+
+ const float mh = gmi * beta1h;
+ const float vh = sqrt(gvi * beta2h) + eps;
+
+ x[gid] = x[gid] * (1.0f - alpha * wd) - alpha * mh / vh;
+}
+
+kernel void kernel_opt_step_sgd_f32(
+ constant ggml_metal_kargs_opt_step_sgd & args,
+ device float * x,
+ device const float * g,
+ device const float * pars,
+ uint gid[[thread_position_in_grid]]) {
+
+ if (gid >= args.np) {
+ return;
+ }
+
+ x[gid] = x[gid] * (1.0f - pars[0] * pars[1]) - pars[0] * g[gid];
+}
+
+template<typename T>
+kernel void kernel_memset(
+ constant ggml_metal_kargs_memset & args,
+ device T * dst,
+ uint tpig[[thread_position_in_grid]]) {
+ dst[tpig] = args.val;
+}
+
+typedef decltype(kernel_memset<int64_t>) kernel_memset_t;
+
+template [[host_name("kernel_memset_i64")]] kernel kernel_memset_t kernel_memset<int64_t>;
+
+constant short FC_count_equal_nsg [[function_constant(FC_COUNT_EQUAL + 0)]];
+
+template<typename T>
+kernel void kernel_count_equal(
+ constant ggml_metal_kargs_count_equal & args,
+ device const char * src0,
+ device const char * src1,
+ device atomic_int * dst,
+ threadgroup int32_t * shmem_i32 [[threadgroup(0)]],
+ uint3 tgpig[[threadgroup_position_in_grid]],
+ ushort3 tpitg[[thread_position_in_threadgroup]],
+ ushort sgitg[[simdgroup_index_in_threadgroup]],
+ ushort tiisg[[thread_index_in_simdgroup]],
+ ushort3 ntg[[threads_per_threadgroup]]) {
+ const short NSG = FC_count_equal_nsg;
+
+ const int i3 = tgpig.z;
+ const int i2 = tgpig.y;
+ const int i1 = tgpig.x;
+
+ if (i3 >= args.ne03 || i2 >= args.ne02 || i1 >= args.ne01) {
+ return;
+ }
+
+ int sum = 0;
+
+ device const char * base0 = src0 + i1*args.nb01 + i2*args.nb02 + i3*args.nb03;
+ device const char * base1 = src1 + i1*args.nb11 + i2*args.nb12 + i3*args.nb13;
+
+ for (int64_t i0 = tpitg.x; i0 < args.ne00; i0 += ntg.x) {
+ const T v0 = *(device const T *)(base0 + i0*args.nb00);
+ const T v1 = *(device const T *)(base1 + i0*args.nb10);
+ sum += (v0 == v1);
+ }
+
+ sum = simd_sum(sum);
+
+ if (tiisg == 0) {
+ shmem_i32[sgitg] = sum;
+ }
+
+ threadgroup_barrier(mem_flags::mem_threadgroup);
+
+ if (sgitg == 0) {
+ float v = 0.0f;
+ if (tpitg.x < NSG) {
+ v = shmem_i32[tpitg.x];
+ }
+
+ float total = simd_sum(v);
+ if (tpitg.x == 0) {
+ atomic_fetch_add_explicit(dst, (int32_t) total, memory_order_relaxed);
+ }
+ }
+}
+
+typedef decltype(kernel_count_equal<int32_t>) kernel_count_equal_t;
+
+template [[host_name("kernel_count_equal_i32")]] kernel kernel_count_equal_t kernel_count_equal<int32_t>;