summaryrefslogtreecommitdiff
path: root/llama.cpp/ggml/src/ggml-cuda/mmvq.cuh
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/ggml/src/ggml-cuda/mmvq.cuh
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/ggml/src/ggml-cuda/mmvq.cuh')
-rw-r--r--llama.cpp/ggml/src/ggml-cuda/mmvq.cuh12
1 files changed, 12 insertions, 0 deletions
diff --git a/llama.cpp/ggml/src/ggml-cuda/mmvq.cuh b/llama.cpp/ggml/src/ggml-cuda/mmvq.cuh
new file mode 100644
index 0000000..4bb10cf
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-cuda/mmvq.cuh
@@ -0,0 +1,12 @@
+#include "common.cuh"
+
+#define MMVQ_MAX_BATCH_SIZE 8 // Max. batch size for which to use MMVQ kernels.
+
+void ggml_cuda_mul_mat_vec_q(ggml_backend_cuda_context & ctx,
+ const ggml_tensor * src0, const ggml_tensor * src1, const ggml_tensor * ids, ggml_tensor * dst, const ggml_cuda_mm_fusion_args_host * fusion = nullptr);
+
+void ggml_cuda_op_mul_mat_vec_q(
+ ggml_backend_cuda_context & ctx,
+ const ggml_tensor * src0, const ggml_tensor * src1, ggml_tensor * dst, const char * src0_dd_i, const float * src1_ddf_i,
+ const char * src1_ddq_i, float * dst_dd_i, const int64_t row_low, const int64_t row_high, const int64_t src1_ncols,
+ const int64_t src1_padded_row_size, cudaStream_t stream);