From b333b06772c89d96aacb5490d6a219fba7c09cc6 Mon Sep 17 00:00:00 2001 From: Mitja Felicijan Date: Thu, 12 Feb 2026 20:57:17 +0100 Subject: Engage! --- llama.cpp/ggml/src/ggml-sycl/softmax.hpp | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 llama.cpp/ggml/src/ggml-sycl/softmax.hpp (limited to 'llama.cpp/ggml/src/ggml-sycl/softmax.hpp') diff --git a/llama.cpp/ggml/src/ggml-sycl/softmax.hpp b/llama.cpp/ggml/src/ggml-sycl/softmax.hpp new file mode 100644 index 0000000..23f1e5a --- /dev/null +++ b/llama.cpp/ggml/src/ggml-sycl/softmax.hpp @@ -0,0 +1,24 @@ +// +// MIT license +// Copyright (C) 2024 Intel Corporation +// SPDX-License-Identifier: MIT +// + +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// + +#ifndef GGML_SYCL_SOFTMAX_HPP +#define GGML_SYCL_SOFTMAX_HPP + +#include "common.hpp" + +#define SYCL_SOFT_MAX_BLOCK_SIZE 1024 + +void ggml_sycl_op_soft_max(ggml_backend_sycl_context &ctx, ggml_tensor *dst); + +void ggml_sycl_op_soft_max_back(ggml_backend_sycl_context & ctx, ggml_tensor * dst); + +#endif // GGML_SYCL_SOFTMAX_HPP -- cgit v1.2.3