summaryrefslogtreecommitdiff
path: root/llama.cpp/ggml/src/ggml-cpu/arch/arm
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/ggml/src/ggml-cpu/arch/arm
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/ggml/src/ggml-cpu/arch/arm')
-rw-r--r--llama.cpp/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp98
-rw-r--r--llama.cpp/ggml/src/ggml-cpu/arch/arm/quants.c4052
-rw-r--r--llama.cpp/ggml/src/ggml-cpu/arch/arm/repack.cpp4237
3 files changed, 8387 insertions, 0 deletions
diff --git a/llama.cpp/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp b/llama.cpp/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp
new file mode 100644
index 0000000..c460c54
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-cpu/arch/arm/cpu-feats.cpp
@@ -0,0 +1,98 @@
+#include "ggml-backend-impl.h"
+
+#if defined(__aarch64__)
+
+#if defined(__linux__)
+#include <sys/auxv.h>
+#elif defined(__APPLE__)
+#include <sys/sysctl.h>
+#endif
+
+#if !defined(HWCAP2_SVE2)
+#define HWCAP2_SVE2 (1 << 1)
+#endif
+
+#if !defined(HWCAP2_I8MM)
+#define HWCAP2_I8MM (1 << 13)
+#endif
+
+#if !defined(HWCAP2_SME)
+#define HWCAP2_SME (1 << 23)
+#endif
+
+struct aarch64_features {
+ // has_neon not needed, aarch64 has NEON guaranteed
+ bool has_dotprod = false;
+ bool has_fp16_va = false;
+ bool has_sve = false;
+ bool has_sve2 = false;
+ bool has_i8mm = false;
+ bool has_sme = false;
+
+ aarch64_features() {
+#if defined(__linux__)
+ uint32_t hwcap = getauxval(AT_HWCAP);
+ uint32_t hwcap2 = getauxval(AT_HWCAP2);
+
+ has_dotprod = !!(hwcap & HWCAP_ASIMDDP);
+ has_fp16_va = !!(hwcap & HWCAP_FPHP);
+ has_sve = !!(hwcap & HWCAP_SVE);
+ has_sve2 = !!(hwcap2 & HWCAP2_SVE2);
+ has_i8mm = !!(hwcap2 & HWCAP2_I8MM);
+ has_sme = !!(hwcap2 & HWCAP2_SME);
+#elif defined(__APPLE__)
+ int oldp = 0;
+ size_t size = sizeof(oldp);
+
+ if (sysctlbyname("hw.optional.arm.FEAT_DotProd", &oldp, &size, NULL, 0) == 0) {
+ has_dotprod = static_cast<bool>(oldp);
+ }
+
+ if (sysctlbyname("hw.optional.arm.FEAT_I8MM", &oldp, &size, NULL, 0) == 0) {
+ has_i8mm = static_cast<bool>(oldp);
+ }
+
+ if (sysctlbyname("hw.optional.arm.FEAT_SME", &oldp, &size, NULL, 0) == 0) {
+ has_sme = static_cast<bool>(oldp);
+ }
+
+ // Apple apparently does not implement SVE yet
+#endif
+ }
+};
+
+static int ggml_backend_cpu_aarch64_score() {
+ int score = 1;
+ aarch64_features af;
+
+#ifdef GGML_USE_DOTPROD
+ if (!af.has_dotprod) { return 0; }
+ score += 1<<1;
+#endif
+#ifdef GGML_USE_FP16_VECTOR_ARITHMETIC
+ if (!af.has_fp16_va) { return 0; }
+ score += 1<<2;
+#endif
+#ifdef GGML_USE_SVE
+ if (!af.has_sve) { return 0; }
+ score += 1<<3;
+#endif
+#ifdef GGML_USE_MATMUL_INT8
+ if (!af.has_i8mm) { return 0; }
+ score += 1<<4;
+#endif
+#ifdef GGML_USE_SVE2
+ if (!af.has_sve2) { return 0; }
+ score += 1<<5;
+#endif
+#ifdef GGML_USE_SME
+ if (!af.has_sme) { return 0; }
+ score += 1<<6;
+#endif
+
+ return score;
+}
+
+GGML_BACKEND_DL_SCORE_IMPL(ggml_backend_cpu_aarch64_score)
+
+# endif // defined(__aarch64__)
diff --git a/llama.cpp/ggml/src/ggml-cpu/arch/arm/quants.c b/llama.cpp/ggml/src/ggml-cpu/arch/arm/quants.c
new file mode 100644
index 0000000..b390ab6
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-cpu/arch/arm/quants.c
@@ -0,0 +1,4052 @@
+#define GGML_COMMON_IMPL_C
+#include "ggml-common.h"
+#include "ggml-quants.h"
+#include "ggml-impl.h"
+#include "ggml-cpu.h"
+#include "simd-mappings.h"
+
+#include "../../quants.h"
+#include "../../ggml-cpu-impl.h"
+
+#include <math.h>
+#include <string.h>
+#include <assert.h>
+#include <float.h>
+#include <stdlib.h> // for qsort
+#include <stdio.h> // for GGML_ASSERT
+
+#define GROUP_MAX_EPS 1e-15f
+#define GROUP_MAX_EPS_IQ3_XXS 1e-8f
+#define GROUP_MAX_EPS_IQ2_S 1e-8f
+#define GROUP_MAX_EPS_IQ1_M 1e-7f
+#define GROUP_MAX_EPS_IQ1_S 1e-12f
+
+#define UNUSED GGML_UNUSED
+
+#if defined(__ARM_NEON)
+#define B1(c,s,n) 0x ## n ## c , 0x ## n ## s
+#define B2(c,s,n) B1(c,s,n ## c), B1(c,s,n ## s)
+#define B3(c,s,n) B2(c,s,n ## c), B2(c,s,n ## s)
+#define B4(c,s,n) B3(c,s,n ## c), B3(c,s,n ## s)
+#define B5(c,s,n) B4(c,s,n ## c), B4(c,s,n ## s)
+#define B6(c,s,n) B5(c,s,n ## c), B5(c,s,n ## s)
+#define B7(c,s,n) B6(c,s,n ## c), B6(c,s,n ## s)
+#define B8(c,s ) B7(c,s, c), B7(c,s, s)
+
+// precomputed tables for expanding 8bits to 8 bytes:
+static const uint64_t table_b2b_0[1 << 8] = { B8(00, 10) }; // ( b) << 4
+static const uint64_t table_b2b_1[1 << 8] = { B8(10, 00) }; // (!b) << 4
+#endif
+
+void quantize_row_q8_0(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
+ assert(QK8_0 == 32);
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ block_q8_0 * GGML_RESTRICT y = vy;
+
+#if defined(__ARM_NEON)
+ for (int i = 0; i < nb; i++) {
+ float32x4_t srcv [8];
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_CPU_FP32_TO_FP16(d);
+
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
+ const int32x4_t vi = vcvtnq_s32_f32(v);
+
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
+ }
+ }
+#else
+ GGML_UNUSED(nb);
+ // scalar
+ quantize_row_q8_0_ref(x, y, k);
+#endif
+}
+
+void quantize_row_q8_1(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
+ assert(k % QK8_1 == 0);
+ const int nb = k / QK8_1;
+
+ block_q8_1 * GGML_RESTRICT y = vy;
+#if defined(__ARM_NEON)
+ for (int i = 0; i < nb; i++) {
+ float32x4_t srcv [8];
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int j = 0; j < 8; j++) srcv[j] = vld1q_f32(x + i*32 + 4*j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2*j] = vmaxq_f32(asrcv[2*j], asrcv[2*j+1]);
+ for (int j = 0; j < 2; j++) amaxv[4*j] = vmaxq_f32(amaxv[4*j], amaxv[4*j+2]);
+ for (int j = 0; j < 1; j++) amaxv[8*j] = vmaxq_f32(amaxv[8*j], amaxv[8*j+4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ const float id = d ? 1.0f/d : 0.0f;
+
+ y[i].d = GGML_CPU_FP32_TO_FP16(d);
+
+ int32x4_t accv = vdupq_n_s32(0);
+
+ for (int j = 0; j < 8; j++) {
+ const float32x4_t v = vmulq_n_f32(srcv[j], id);
+ const int32x4_t vi = vcvtnq_s32_f32(v);
+
+ y[i].qs[4*j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[4*j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[4*j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[4*j + 3] = vgetq_lane_s32(vi, 3);
+
+ accv = vaddq_s32(accv, vi);
+ }
+
+ y[i].s = GGML_CPU_FP32_TO_FP16(d * vaddvq_s32(accv));
+ }
+#else
+ GGML_UNUSED(nb);
+ // scalar
+ quantize_row_q8_1_ref(x, y, k);
+#endif
+}
+
+// placeholder implementation for Apple targets
+void quantize_row_q8_K(const float * GGML_RESTRICT x, void * GGML_RESTRICT y, int64_t k) {
+ quantize_row_q8_K_ref(x, y, k);
+}
+
+//===================================== Dot products =================================
+
+void ggml_vec_dot_q4_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+ assert((nrc == 2) || (nrc == 1));
+#else
+ assert(nrc == 1);
+#endif
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q4_0 * GGML_RESTRICT x = vx;
+ const block_q8_0 * GGML_RESTRICT y = vy;
+
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+ if (nrc == 2) {
+ const block_q4_0 * GGML_RESTRICT vx0 = vx;
+ const block_q4_0 * GGML_RESTRICT vx1 = (const block_q4_0 *) ((const uint8_t*)vx + bx);
+ const block_q8_0 * GGML_RESTRICT vy0 = vy;
+ const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by);
+
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+
+ for (int i = 0; i < nb; i++) {
+ const block_q4_0 * GGML_RESTRICT b_x0 = &vx0[i];
+ const block_q4_0 * GGML_RESTRICT b_x1 = &vx1[i];
+ const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i];
+ const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+ const int8x16_t s8b = vdupq_n_s8(0x8);
+
+ const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // sub 8
+ const int8x16_t x0_l = vsubq_s8(v0_0l, s8b);
+ const int8x16_t x0_h = vsubq_s8(v0_0h, s8b);
+ const int8x16_t x1_l = vsubq_s8(v0_1l, s8b);
+ const int8x16_t x1_h = vsubq_s8(v0_1h, s8b);
+
+ // load y
+ const int8x16_t y0_l = vld1q_s8(b_y0->qs);
+ const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
+ const int8x16_t y1_l = vld1q_s8(b_y1->qs);
+ const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
+
+ float32_t _scale[4] = {
+ GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d),
+ GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d),
+ GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d),
+ GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d)
+ };
+ float32x4_t scale = vld1q_f32(_scale);
+
+ int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
+ int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
+
+ int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
+ int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
+
+ int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
+ int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
+
+ int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
+ int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
+
+ sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
+ l1, r1)), l2, r2)), l3, r3))), scale);
+ }
+
+ float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2);
+ float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
+
+ vst1_f32(s, vget_low_f32 (sumv2));
+ vst1_f32(s + bs, vget_high_f32(sumv2));
+
+ return;
+ }
+#endif
+
+ int ib = 0;
+ float sumf = 0;
+
+#if defined(__ARM_FEATURE_SVE)
+ svfloat32_t sumv0 = svdup_n_f32(0.0f);
+ svfloat32_t sumv1 = svdup_n_f32(0.0f);
+
+ const int vector_length = ggml_cpu_get_sve_cnt()*8;
+
+ // VLA Implementation using switch case
+ switch (vector_length) {
+ case 128:
+ {
+ // predicate for activating higher lanes for 4 float32 elements
+ const svbool_t ph4 = svptrue_pat_b32(SV_VL4);
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ // load x
+ const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs);
+ const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs);
+
+ // 4-bit -> 8-bit
+ const svint8_t qx0l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx0r, 0x0F));
+ const svint8_t qx0h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx0r, 0x04));
+ const svint8_t qx1l = svreinterpret_s8_u8(svand_n_u8_m(svptrue_b8(), qx1r, 0x0F));
+ const svint8_t qx1h = svreinterpret_s8_u8(svlsr_n_u8_m(svptrue_b8(), qx1r, 0x04));
+
+ // sub 8
+ const svint8_t qx0ls = svsub_n_s8_x(svptrue_b8(), qx0h, 8);
+ const svint8_t qx0hs = svsub_n_s8_x(svptrue_b8(), qx0l, 8);
+ const svint8_t qx1ls = svsub_n_s8_x(svptrue_b8(), qx1h, 8);
+ const svint8_t qx1hs = svsub_n_s8_x(svptrue_b8(), qx1l, 8);
+
+ // load y
+ const svint8_t qy0h = svld1_s8(svptrue_b8(), y0->qs);
+ const svint8_t qy0l = svld1_s8(svptrue_b8(), y0->qs + 16);
+ const svint8_t qy1h = svld1_s8(svptrue_b8(), y1->qs);
+ const svint8_t qy1l = svld1_s8(svptrue_b8(), y1->qs + 16);
+
+ // dot product
+ sumv0 = svmla_n_f32_x(ph4, sumv0, svcvt_f32_s32_x(ph4, svadd_x(ph4,
+ svdot_s32(svdup_n_s32(0), qx0ls, qy0l),
+ svdot_s32(svdup_n_s32(0), qx0hs, qy0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = svmla_n_f32_x(ph4, sumv1, svcvt_f32_s32_x(ph4, svadd_x(ph4,
+ svdot_s32(svdup_n_s32(0), qx1ls, qy1l),
+ svdot_s32(svdup_n_s32(0), qx1hs, qy1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1));
+ } break;
+ case 256:
+ {
+ // predicate for activating higher lanes for 16 int8 elements
+ const svbool_t ph16 = svptrue_pat_b8(SV_VL16);
+ // predicate for activating lower lanes for 16 int8 elements
+ const svbool_t pl16 = svnot_b_z(svptrue_b8(), ph16);
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ // load x
+ const svuint8_t qx0r = svld1rq_u8(svptrue_b8(), x0->qs);
+ const svuint8_t qx1r = svld1rq_u8(svptrue_b8(), x1->qs);
+
+ // 4-bit -> 8-bit
+ const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04));
+ const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04));
+
+ // sub 8
+ const svint8_t qx0s = svsub_n_s8_x(svptrue_b8(), qx0, 8);
+ const svint8_t qx1s = svsub_n_s8_x(svptrue_b8(), qx1, 8);
+
+ // load y
+ const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs);
+ const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs);
+
+ // dot product
+ sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(),
+ svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(),
+ svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1));
+ } break;
+ case 512:
+ {
+ // predicate for activating higher lanes for 32 int8 elements
+ const svbool_t ph32 = svptrue_pat_b8(SV_VL32);
+
+ // predicate for activating higher lanes for 16 int8 elements
+ const svbool_t ph16 = svptrue_pat_b8(SV_VL16);
+ // predicate for activating lower lanes for 16 int8 elements from first 32 int8 activated lanes
+ const svbool_t pl16 = svnot_b_z(ph32, ph16);
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ // load x
+ const svuint8_t qx0r = svld1rq_u8(ph32, x0->qs);
+ const svuint8_t qx1r = svld1rq_u8(ph32, x1->qs);
+
+ // 4-bit -> 8-bit
+ const svint8_t qx0 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx0r, 0x0F), 0x04));
+ const svint8_t qx1 = svreinterpret_s8_u8(svlsr_n_u8_m(pl16, svand_n_u8_m(ph16, qx1r, 0x0F), 0x04));
+
+ // sub 8
+ const svint8_t qx0s = svsub_n_s8_x(ph32, qx0, 8);
+ const svint8_t qx1s = svsub_n_s8_x(ph32, qx1, 8);
+
+ // load y
+ const svint8_t qy0 = svld1_s8(ph32, y0->qs);
+ const svint8_t qy1 = svld1_s8(ph32, y1->qs);
+
+ // dot product
+ sumv0 = svmla_n_f32_x(ph32, sumv0, svcvt_f32_s32_x(ph32,
+ svdot_s32(svdup_n_s32(0), qx0s, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = svmla_n_f32_x(ph32, sumv1, svcvt_f32_s32_x(ph32,
+ svdot_s32(svdup_n_s32(0), qx1s, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = svaddv_f32(ph32, svadd_f32_x(ph32, sumv0, sumv1));
+ } break;
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ }
+
+#elif defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q4_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q4_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+ const int8x16_t s8b = vdupq_n_s8(0x8);
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // sub 8
+ const int8x16_t v0_0ls = vsubq_s8(v0_0l, s8b);
+ const int8x16_t v0_0hs = vsubq_s8(v0_0h, s8b);
+ const int8x16_t v0_1ls = vsubq_s8(v0_1l, s8b);
+ const int8x16_t v0_1hs = vsubq_s8(v0_1h, s8b);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+ // dot product into int32x4_t
+ const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0ls, v1_0l), v0_0hs, v1_0h);
+ const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1ls, v1_1l), v0_1hs, v1_1h);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#endif
+ for (; ib < nb; ++ib) {
+ int sumi0 = 0;
+ int sumi1 = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[ib].qs[j] & 0x0F) - 8;
+ const int v1 = (x[ib].qs[j] >> 4) - 8;
+
+ sumi0 += (v0 * y[ib].qs[j]);
+ sumi1 += (v1 * y[ib].qs[j + qk/2]);
+ }
+
+ int sumi = sumi0 + sumi1;
+ sumf += sumi*GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d);
+ }
+
+ *s = sumf;
+}
+
+void ggml_vec_dot_q4_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ const int qk = QK8_1;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+ assert((nrc == 2) || (nrc == 1));
+#else
+ assert(nrc == 1);
+#endif
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q4_1 * GGML_RESTRICT x = vx;
+ const block_q8_1 * GGML_RESTRICT y = vy;
+
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+ if (nrc == 2) {
+ const block_q4_1 * GGML_RESTRICT vx0 = vx;
+ const block_q4_1 * GGML_RESTRICT vx1 = (const block_q4_1 *) ((const uint8_t*)vx + bx);
+ const block_q8_1 * GGML_RESTRICT vy0 = vy;
+ const block_q8_1 * GGML_RESTRICT vy1 = (const block_q8_1 *) ((const uint8_t*)vy + by);
+
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t summs0 = vdupq_n_f32(0.0f);
+
+ for (int i = 0; i < nb; i++) {
+ const block_q4_1 * GGML_RESTRICT b_x0 = &vx0[i];
+ const block_q4_1 * GGML_RESTRICT b_x1 = &vx1[i];
+ const block_q8_1 * GGML_RESTRICT b_y0 = &vy0[i];
+ const block_q8_1 * GGML_RESTRICT b_y1 = &vy1[i];
+
+ float32_t summs_t[4] = {
+ GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y0->s),
+ GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y0->s),
+ GGML_CPU_FP16_TO_FP32(b_x0->m) * GGML_CPU_FP16_TO_FP32(b_y1->s),
+ GGML_CPU_FP16_TO_FP32(b_x1->m) * GGML_CPU_FP16_TO_FP32(b_y1->s)
+ };
+ summs0 = vaddq_f32(summs0, vld1q_f32(summs_t));
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ const uint8x16_t v0_0 = vld1q_u8(b_x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(b_x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t x0_l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t x0_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t x1_l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t x1_h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // load y
+ const int8x16_t y0_l = vld1q_s8(b_y0->qs);
+ const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
+ const int8x16_t y1_l = vld1q_s8(b_y1->qs);
+ const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
+
+ // mmla into int32x4_t
+ float32_t _scale[4] = {
+ GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d),
+ GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d),
+ GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d),
+ GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d)
+ };
+ float32x4_t scale = vld1q_f32(_scale);
+
+ int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
+ int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
+
+ int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
+ int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
+
+ int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
+ int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
+
+ int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
+ int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
+ sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
+ l1, r1)), l2, r2)), l3, r3))), scale);
+ }
+
+ float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2);
+ float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
+
+ sumv2 = vaddq_f32(sumv2, summs0);
+
+ vst1_f32(s, vget_low_f32 (sumv2));
+ vst1_f32(s + bs, vget_high_f32(sumv2));
+
+ return;
+ }
+#endif
+
+ int ib = 0;
+ float sumf = 0;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ float summs = 0;
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q4_1 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q4_1 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_1 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ summs += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s) + GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s);
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+ // dot product into int32x4_t
+ const int32x4_t p_0 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_0l, v1_0l), v0_0h, v1_0h);
+ const int32x4_t p_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), v0_1l, v1_1l), v0_1h, v1_1h);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(p_0), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(p_1), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs;
+
+#endif
+ for (; ib < nb; ++ib) {
+ int sumi0 = 0;
+ int sumi1 = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const int v0 = (x[ib].qs[j] & 0x0F);
+ const int v1 = (x[ib].qs[j] >> 4);
+
+ sumi0 += (v0 * y[ib].qs[j]);
+ sumi1 += (v1 * y[ib].qs[j + qk/2]);
+ }
+
+ int sumi = sumi0 + sumi1;
+ sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
+ }
+
+ *s = sumf;
+}
+
+void ggml_vec_dot_mxfp4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+ assert(n % QK_MXFP4 == 0);
+ static_assert(QK_MXFP4 == QK8_0, "QK_MXFP4 and QK8_0 must be the same");
+
+ const block_mxfp4 * GGML_RESTRICT x = vx;
+ const block_q8_0 * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_MXFP4;
+
+ int ib = 0;
+ float sumf = 0;
+
+#if defined __ARM_NEON
+ const int8x16_t values = vld1q_s8(kvalues_mxfp4);
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ uint8x16x2_t q4bits;
+ int8x16x4_t q4b;
+ int8x16x4_t q8b;
+ int32x4_t prod_1;
+ int32x4_t prod_2;
+
+ for (; ib + 1 < nb; ib += 2) {
+ q4bits.val[0] = vld1q_u8(x[ib + 0].qs);
+ q4bits.val[1] = vld1q_u8(x[ib + 1].qs);
+ q8b.val[0] = vld1q_s8(y[ib + 0].qs);
+ q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16);
+ q8b.val[2] = vld1q_s8(y[ib + 1].qs);
+ q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16);
+
+ q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
+ q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
+ q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
+ q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
+
+ prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
+ prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
+
+ sumf +=
+ GGML_E8M0_TO_FP32_HALF(x[ib + 0].e) * GGML_CPU_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) +
+ GGML_E8M0_TO_FP32_HALF(x[ib + 1].e) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2);
+ }
+
+#endif
+ for (; ib < nb; ++ib) {
+ const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_E8M0_TO_FP32_HALF(x[ib].e);
+ int sumi1 = 0;
+ int sumi2 = 0;
+ for (int j = 0; j < QK_MXFP4/2; ++j) {
+ sumi1 += y[ib].qs[j + 0] * kvalues_mxfp4[x[ib].qs[j] & 0xf];
+ sumi2 += y[ib].qs[j + QK_MXFP4/2] * kvalues_mxfp4[x[ib].qs[j] >> 4];
+ }
+ sumf += d * (sumi1 + sumi2);
+ }
+ *s = sumf;
+}
+
+void ggml_vec_dot_q5_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ int ib = 0;
+ float sumf = 0;
+
+ assert(n % qk == 0);
+ assert(qk == QK5_0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q5_0 * GGML_RESTRICT x = vx;
+ const block_q8_0 * GGML_RESTRICT y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ uint32_t qh0;
+ uint32_t qh1;
+
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q5_0 * GGML_RESTRICT x0 = &x[ib];
+ const block_q5_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ // extract the 5th bit via lookup table ((!b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
+
+ tmp0[0] = table_b2b_1[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_1[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_1[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_1[(qh0 >> 24) ];
+
+ tmp1[0] = table_b2b_1[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_1[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_1[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_1[(qh1 >> 24) ];
+
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // add high bit and sub 16 (equivalent to sub 0x10 when bit is zero)
+ const int8x16_t v0_0lf = vsubq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vsubq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vsubq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vsubq_s8(v0_1h, qhh1);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+
+#endif
+ for (; ib < nb; ++ib) {
+ uint32_t qh;
+ memcpy(&qh, x[ib].qh, sizeof(qh));
+
+ int sumi0 = 0;
+ int sumi1 = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh & (1u << (j + 0 ))) >> (j + 0 )) << 4;
+ const uint8_t xh_1 = ((qh & (1u << (j + 16))) >> (j + 12));
+
+ const int32_t x0 = (int8_t)(((x[ib].qs[j] & 0x0F) | xh_0) - 16);
+ const int32_t x1 = (int8_t)(((x[ib].qs[j] >> 4) | xh_1) - 16);
+
+ sumi0 += (x0 * y[ib].qs[j]);
+ sumi1 += (x1 * y[ib].qs[j + qk/2]);
+ }
+
+ int sumi = sumi0 + sumi1;
+ sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d)) * sumi;
+ }
+
+ *s = sumf;
+}
+
+void ggml_vec_dot_q5_1_q8_1(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ const int qk = QK8_1;
+ const int nb = n / qk;
+
+ int ib = 0;
+ float sumf = 0;
+
+ assert(n % qk == 0);
+ assert(qk == QK5_1);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q5_1 * GGML_RESTRICT x = vx;
+ const block_q8_1 * GGML_RESTRICT y = vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ float summs0 = 0.0f;
+ float summs1 = 0.0f;
+
+ uint32_t qh0;
+ uint32_t qh1;
+
+ uint64_t tmp0[4];
+ uint64_t tmp1[4];
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q5_1 * GGML_RESTRICT x0 = &x[ib];
+ const block_q5_1 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_1 * GGML_RESTRICT y0 = &y[ib];
+ const block_q8_1 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0F);
+
+ summs0 += GGML_CPU_FP16_TO_FP32(x0->m) * GGML_CPU_FP16_TO_FP32(y0->s);
+ summs1 += GGML_CPU_FP16_TO_FP32(x1->m) * GGML_CPU_FP16_TO_FP32(y1->s);
+
+ // extract the 5th bit via lookup table ((b) << 4)
+ memcpy(&qh0, x0->qh, sizeof(qh0));
+ memcpy(&qh1, x1->qh, sizeof(qh1));
+
+ tmp0[0] = table_b2b_0[(qh0 >> 0) & 0xFF];
+ tmp0[1] = table_b2b_0[(qh0 >> 8) & 0xFF];
+ tmp0[2] = table_b2b_0[(qh0 >> 16) & 0xFF];
+ tmp0[3] = table_b2b_0[(qh0 >> 24) ];
+
+ tmp1[0] = table_b2b_0[(qh1 >> 0) & 0xFF];
+ tmp1[1] = table_b2b_0[(qh1 >> 8) & 0xFF];
+ tmp1[2] = table_b2b_0[(qh1 >> 16) & 0xFF];
+ tmp1[3] = table_b2b_0[(qh1 >> 24) ];
+
+ const int8x16_t qhl0 = vld1q_s8((const int8_t *)(tmp0 + 0));
+ const int8x16_t qhh0 = vld1q_s8((const int8_t *)(tmp0 + 2));
+ const int8x16_t qhl1 = vld1q_s8((const int8_t *)(tmp1 + 0));
+ const int8x16_t qhh1 = vld1q_s8((const int8_t *)(tmp1 + 2));
+
+ const uint8x16_t v0_0 = vld1q_u8(x0->qs);
+ const uint8x16_t v0_1 = vld1q_u8(x1->qs);
+
+ // 4-bit -> 8-bit
+ const int8x16_t v0_0l = vreinterpretq_s8_u8(vandq_u8 (v0_0, m4b));
+ const int8x16_t v0_0h = vreinterpretq_s8_u8(vshrq_n_u8(v0_0, 4));
+ const int8x16_t v0_1l = vreinterpretq_s8_u8(vandq_u8 (v0_1, m4b));
+ const int8x16_t v0_1h = vreinterpretq_s8_u8(vshrq_n_u8(v0_1, 4));
+
+ // add high bit
+ const int8x16_t v0_0lf = vorrq_s8(v0_0l, qhl0);
+ const int8x16_t v0_0hf = vorrq_s8(v0_0h, qhh0);
+ const int8x16_t v0_1lf = vorrq_s8(v0_1l, qhl1);
+ const int8x16_t v0_1hf = vorrq_s8(v0_1h, qhh1);
+
+ // load y
+ const int8x16_t v1_0l = vld1q_s8(y0->qs);
+ const int8x16_t v1_0h = vld1q_s8(y0->qs + 16);
+ const int8x16_t v1_1l = vld1q_s8(y1->qs);
+ const int8x16_t v1_1h = vld1q_s8(y1->qs + 16);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_0lf, v1_0l),
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_0hf, v1_0h))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_1lf, v1_1l),
+ ggml_vdotq_s32(vdupq_n_s32(0), v0_1hf, v1_1h))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1) + summs0 + summs1;
+
+#endif
+ for (; ib < nb; ++ib) {
+ uint32_t qh;
+ memcpy(&qh, x[ib].qh, sizeof(qh));
+
+ int sumi0 = 0;
+ int sumi1 = 0;
+
+ for (int j = 0; j < qk/2; ++j) {
+ const uint8_t xh_0 = ((qh >> (j + 0)) << 4) & 0x10;
+ const uint8_t xh_1 = ((qh >> (j + 12)) ) & 0x10;
+
+ const int32_t x0 = (x[ib].qs[j] & 0xF) | xh_0;
+ const int32_t x1 = (x[ib].qs[j] >> 4) | xh_1;
+
+ sumi0 += (x0 * y[ib].qs[j]);
+ sumi1 += (x1 * y[ib].qs[j + qk/2]);
+ }
+
+ int sumi = sumi0 + sumi1;
+ sumf += (GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d))*sumi + GGML_CPU_FP16_TO_FP32(x[ib].m)*GGML_CPU_FP16_TO_FP32(y[ib].s);
+ }
+
+ *s = sumf;
+}
+
+void ggml_vec_dot_q8_0_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+
+ assert(n % qk == 0);
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+ assert((nrc == 2) || (nrc == 1));
+#else
+ assert(nrc == 1);
+#endif
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q8_0 * GGML_RESTRICT x = vx;
+ const block_q8_0 * GGML_RESTRICT y = vy;
+
+#if defined(__ARM_FEATURE_MATMUL_INT8)
+ if (nrc == 2) {
+ const block_q8_0 * GGML_RESTRICT vx0 = vx;
+ const block_q8_0 * GGML_RESTRICT vx1 = (const block_q8_0 *) ((const uint8_t*)vx + bx);
+ const block_q8_0 * GGML_RESTRICT vy0 = vy;
+ const block_q8_0 * GGML_RESTRICT vy1 = (const block_q8_0 *) ((const uint8_t*)vy + by);
+
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+
+ for (int i = 0; i < nb; i++) {
+ const block_q8_0 * GGML_RESTRICT b_x0 = &vx0[i];
+ const block_q8_0 * GGML_RESTRICT b_y0 = &vy0[i];
+
+ const block_q8_0 * GGML_RESTRICT b_x1 = &vx1[i];
+ const block_q8_0 * GGML_RESTRICT b_y1 = &vy1[i];
+
+ const int8x16_t x0_l = vld1q_s8(b_x0->qs);
+ const int8x16_t x0_h = vld1q_s8(b_x0->qs + 16);
+ const int8x16_t x1_l = vld1q_s8(b_x1->qs);
+ const int8x16_t x1_h = vld1q_s8(b_x1->qs + 16);
+
+ // load y
+ const int8x16_t y0_l = vld1q_s8(b_y0->qs);
+ const int8x16_t y0_h = vld1q_s8(b_y0->qs + 16);
+ const int8x16_t y1_l = vld1q_s8(b_y1->qs);
+ const int8x16_t y1_h = vld1q_s8(b_y1->qs + 16);
+
+ float32_t _scale[4] = {
+ GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y0->d),
+ GGML_CPU_FP16_TO_FP32(b_x0->d)*GGML_CPU_FP16_TO_FP32(b_y1->d),
+ GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y0->d),
+ GGML_CPU_FP16_TO_FP32(b_x1->d)*GGML_CPU_FP16_TO_FP32(b_y1->d)
+ };
+ float32x4_t scale = vld1q_f32(_scale);
+
+ int8x16_t l0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
+ int8x16_t l1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_l), vreinterpretq_s64_s8(x1_l)));
+
+ int8x16_t l2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
+ int8x16_t l3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(x0_h), vreinterpretq_s64_s8(x1_h)));
+
+ int8x16_t r0 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
+ int8x16_t r1 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_l), vreinterpretq_s64_s8(y1_l)));
+
+ int8x16_t r2 = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
+ int8x16_t r3 = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(y0_h), vreinterpretq_s64_s8(y1_h)));
+
+ sumv0 = vmlaq_f32(sumv0,(vcvtq_f32_s32(vmmlaq_s32((vmmlaq_s32((vmmlaq_s32((vmmlaq_s32(vdupq_n_s32(0), l0, r0)),
+ l1, r1)), l2, r2)), l3, r3))), scale);
+ }
+
+ float32x4_t sumv1 = vextq_f32 (sumv0, sumv0, 2);
+ float32x4_t sumv2 = vzip1q_f32(sumv0, sumv1);
+
+ vst1_f32(s, vget_low_f32 (sumv2));
+ vst1_f32(s + bs, vget_high_f32(sumv2));
+
+ return;
+ }
+#endif
+
+ int ib = 0;
+ float sumf = 0;
+
+#if defined(__ARM_FEATURE_SVE)
+ svfloat32_t sumv0 = svdup_n_f32(0.0f);
+ svfloat32_t sumv1 = svdup_n_f32(0.0f);
+
+ const int vector_length = ggml_cpu_get_sve_cnt()*8;
+
+ //VLA Implemenation for SVE
+ switch (vector_length) {
+ case 128:
+ {
+ // predicate for activating lanes for 16 Int8 elements
+ const svbool_t ph16 = svptrue_pat_b8 (SV_VL16);
+ const svbool_t pl16 = svptrue_pat_b32(SV_VL4);
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ // load x
+ const svint8_t qx0_0 = svld1_s8(ph16, x0->qs);
+ const svint8_t qx0_1 = svld1_s8(ph16, x0->qs+16);
+ const svint8_t qx1_0 = svld1_s8(ph16, x1->qs);
+ const svint8_t qx1_1 = svld1_s8(ph16, x1->qs+16);
+
+ // load y
+ const svint8_t qy0_0 = svld1_s8(ph16, y0->qs);
+ const svint8_t qy0_1 = svld1_s8(ph16, y0->qs+16);
+ const svint8_t qy1_0 = svld1_s8(ph16, y1->qs);
+ const svint8_t qy1_1 = svld1_s8(ph16, y1->qs+16);
+
+ sumv0 = svmla_n_f32_x(pl16, sumv0, svcvt_f32_s32_x(pl16, svadd_x(pl16,
+ svdot_s32(svdup_n_s32(0), qx0_0, qy0_0),
+ svdot_s32(svdup_n_s32(0), qx0_1, qy0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = svmla_n_f32_x(pl16, sumv1, svcvt_f32_s32_x(pl16, svadd_x(pl16,
+ svdot_s32(svdup_n_s32(0), qx1_0, qy1_0),
+ svdot_s32(svdup_n_s32(0), qx1_1, qy1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = svaddv_f32(pl16, svadd_f32_x(pl16, sumv0, sumv1));
+ } break;
+ case 256:
+ {
+ //printf("sve256");
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ // load x
+ const svint8_t qx0 = svld1_s8(svptrue_b8(), x0->qs);
+ const svint8_t qx1 = svld1_s8(svptrue_b8(), x1->qs);
+
+ // load y
+ const svint8_t qy0 = svld1_s8(svptrue_b8(), y0->qs);
+ const svint8_t qy1 = svld1_s8(svptrue_b8(), y1->qs);
+
+ sumv0 = svmla_n_f32_x(svptrue_b32(), sumv0, svcvt_f32_s32_x(svptrue_b32(),
+ svdot_s32(svdup_n_s32(0), qx0, qy0)), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+ sumv1 = svmla_n_f32_x(svptrue_b32(), sumv1, svcvt_f32_s32_x(svptrue_b32(),
+ svdot_s32(svdup_n_s32(0), qx1, qy1)), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = svaddv_f32(svptrue_b32(), svadd_f32_x(svptrue_b32(), sumv0, sumv1));
+ } break;
+ case 512:
+ {
+ // predicate for activating high 256 bit
+ const svbool_t ph32 = svptrue_pat_b8(SV_VL32);
+ // predicate for activating low 256 bit
+ const svbool_t pl32 = svnot_b_z(svptrue_b8(), ph32);
+
+ // predicate for activating high lanes for 8 float32 elements
+ const svbool_t ph8 = svptrue_pat_b32(SV_VL8);
+ // predicate for activating low lanes for 8 float32 elements
+ const svbool_t pl8 = svnot_b_z(svptrue_b32(), ph8);
+
+ svfloat32_t sumv00 = svdup_n_f32(0.0f);
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ //load 32 int8_t in first half of vector and put another 32 int8_t in second vector lower bits
+ // and add them to make one 64 element vector
+ // load x
+ const svint8_t qx_32 = svld1_s8(ph32, x0->qs);
+ svint8_t qx_64 = svld1_s8(pl32, x0->qs + 2);
+
+ qx_64 = svadd_s8_x(svptrue_b8(), qx_32, qx_64);
+
+ // load y
+ const svint8_t qy_32 = svld1_s8(ph32, y0->qs);
+ svint8_t qy_64 = svld1_s8(pl32, y0->qs + 2);
+
+ qy_64 = svadd_s8_x(svptrue_b8(), qy_32, qy_64);
+
+ // scale creation
+ const float32_t deq1 = GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d);
+ const float32_t deq2 = GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d);
+
+ // duplicate deq1 in first half of vector and deq2 in second half of vector
+ const svfloat32_t temp = svdup_f32_m(svdup_f32_z(ph8, deq1), pl8, deq2);
+
+ const svfloat32_t sumvt = svcvt_f32_s32_x(svptrue_b32(), svdot_s32(svdup_n_s32(0), qx_64, qy_64));
+
+ sumv00 = svmla_f32_m(svptrue_b32(), sumv00, sumvt, temp);
+ }
+
+ sumf = svaddv_f32(svptrue_b32(), sumv00);
+ break;
+ }
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ }
+#elif defined(__ARM_NEON)
+ float32x4_t sumv0 = vdupq_n_f32(0.0f);
+ float32x4_t sumv1 = vdupq_n_f32(0.0f);
+
+ for (; ib + 1 < nb; ib += 2) {
+ const block_q8_0 * GGML_RESTRICT x0 = &x[ib + 0];
+ const block_q8_0 * GGML_RESTRICT x1 = &x[ib + 1];
+ const block_q8_0 * GGML_RESTRICT y0 = &y[ib + 0];
+ const block_q8_0 * GGML_RESTRICT y1 = &y[ib + 1];
+
+ const int8x16_t x0_0 = vld1q_s8(x0->qs);
+ const int8x16_t x0_1 = vld1q_s8(x0->qs + 16);
+ const int8x16_t x1_0 = vld1q_s8(x1->qs);
+ const int8x16_t x1_1 = vld1q_s8(x1->qs + 16);
+
+ // load y
+ const int8x16_t y0_0 = vld1q_s8(y0->qs);
+ const int8x16_t y0_1 = vld1q_s8(y0->qs + 16);
+ const int8x16_t y1_0 = vld1q_s8(y1->qs);
+ const int8x16_t y1_1 = vld1q_s8(y1->qs + 16);
+
+ sumv0 = vmlaq_n_f32(sumv0, vcvtq_f32_s32(vaddq_s32(
+ ggml_vdotq_s32(vdupq_n_s32(0), x0_0, y0_0),
+ ggml_vdotq_s32(vdupq_n_s32(0), x0_1, y0_1))), GGML_CPU_FP16_TO_FP32(x0->d)*GGML_CPU_FP16_TO_FP32(y0->d));
+
+ sumv1 = vmlaq_n_f32(sumv1, vcvtq_f32_s32(vaddq_s32(
+ ggml_vdotq_s32(vdupq_n_s32(0), x1_0, y1_0),
+ ggml_vdotq_s32(vdupq_n_s32(0), x1_1, y1_1))), GGML_CPU_FP16_TO_FP32(x1->d)*GGML_CPU_FP16_TO_FP32(y1->d));
+ }
+
+ sumf = vaddvq_f32(sumv0) + vaddvq_f32(sumv1);
+#endif
+ for (; ib < nb; ++ib) {
+ int sumi = 0;
+
+ for (int j = 0; j < qk; j++) {
+ sumi += x[ib].qs[j]*y[ib].qs[j];
+ }
+
+ sumf += sumi*(GGML_CPU_FP16_TO_FP32(x[ib].d)*GGML_CPU_FP16_TO_FP32(y[ib].d));
+ }
+
+ *s = sumf;
+}
+
+void ggml_vec_dot_tq1_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_tq1_0 * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_NEON)
+ float sumf = 0.0f;
+
+ uint8_t k_shift[16] = {1, 1, 1, 1, 3, 3, 3, 3, 9, 9, 9, 9, 27, 27, 27, 27};
+
+ const uint8x16_t shift = vld1q_u8(k_shift);
+
+ for (int i = 0; i < nb; ++i) {
+#if defined(__ARM_FEATURE_DOTPROD)
+ int32x4_t sumi0 = vdupq_n_s32(0);
+ int32x4_t sumi1 = vdupq_n_s32(0);
+#else
+ int16x8_t sumi0 = vdupq_n_s16(0);
+ int16x8_t sumi1 = vdupq_n_s16(0);
+#endif
+
+ // first 32 bytes of 5 elements
+ {
+ uint8x16_t qx0 = vld1q_u8(x[i].qs + 0);
+ uint8x16_t qx1 = vld1q_u8(x[i].qs + 16);
+ uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(3));
+ uint8x16_t qx3 = vmulq_u8(qx1, vdupq_n_u8(3));
+ uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(9));
+ uint8x16_t qx5 = vmulq_u8(qx1, vdupq_n_u8(9));
+ uint8x16_t qx6 = vmulq_u8(qx0, vdupq_n_u8(27));
+ uint8x16_t qx7 = vmulq_u8(qx1, vdupq_n_u8(27));
+ uint8x16_t qx8 = vmulq_u8(qx0, vdupq_n_u8(81));
+ uint8x16_t qx9 = vmulq_u8(qx1, vdupq_n_u8(81));
+
+ // multiply by 3 and keep the 2 bits above 8 bits
+ int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6));
+ int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6));
+ int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6));
+ int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6));
+ int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6));
+ int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6));
+ int8x16_t sqx6 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx6, vshrq_n_u8(qx6, 1)), 6));
+ int8x16_t sqx7 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx7, vshrq_n_u8(qx7, 1)), 6));
+ int8x16_t sqx8 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx8, vshrq_n_u8(qx8, 1)), 6));
+ int8x16_t sqx9 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx9, vshrq_n_u8(qx9, 1)), 6));
+
+ const int8x16_t qy0 = vld1q_s8(y[i].qs + 0);
+ const int8x16_t qy1 = vld1q_s8(y[i].qs + 16);
+ const int8x16_t qy2 = vld1q_s8(y[i].qs + 32);
+ const int8x16_t qy3 = vld1q_s8(y[i].qs + 48);
+ const int8x16_t qy4 = vld1q_s8(y[i].qs + 64);
+ const int8x16_t qy5 = vld1q_s8(y[i].qs + 80);
+ const int8x16_t qy6 = vld1q_s8(y[i].qs + 96);
+ const int8x16_t qy7 = vld1q_s8(y[i].qs + 112);
+ const int8x16_t qy8 = vld1q_s8(y[i].qs + 128);
+ const int8x16_t qy9 = vld1q_s8(y[i].qs + 144);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumi0 = vdotq_s32(sumi0, sqx0, qy0);
+ sumi1 = vdotq_s32(sumi1, sqx1, qy1);
+ sumi0 = vdotq_s32(sumi0, sqx2, qy2);
+ sumi1 = vdotq_s32(sumi1, sqx3, qy3);
+ sumi0 = vdotq_s32(sumi0, sqx4, qy4);
+ sumi1 = vdotq_s32(sumi1, sqx5, qy5);
+ sumi0 = vdotq_s32(sumi0, sqx6, qy6);
+ sumi1 = vdotq_s32(sumi1, sqx7, qy7);
+ sumi0 = vdotq_s32(sumi0, sqx8, qy8);
+ sumi1 = vdotq_s32(sumi1, sqx9, qy9);
+#else
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx8), vget_low_s8(qy8));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx8), vget_high_s8(qy8));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx9), vget_low_s8(qy9));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx9), vget_high_s8(qy9));
+#endif
+ }
+
+ // last 16 bytes of 5-element, along with the 4 bytes of 4 elements
+ {
+ uint8x16_t qx0 = vld1q_u8(x[i].qs + 32);
+ uint8x16_t qx1 = vmulq_u8(qx0, vdupq_n_u8(3));
+ uint8x16_t qx2 = vmulq_u8(qx0, vdupq_n_u8(9));
+ uint8x16_t qx3 = vmulq_u8(qx0, vdupq_n_u8(27));
+ uint8x16_t qx4 = vmulq_u8(qx0, vdupq_n_u8(81));
+ uint32_t qh;
+ memcpy(&qh, x[i].qh, sizeof(qh)); // potentially unaligned
+ uint8x16_t qx5 = vreinterpretq_u8_u32(vdupq_n_u32(qh));
+ qx5 = vmulq_u8(qx5, shift);
+
+ // multiply by 3 and keep the 2 bits above 8 bits
+ int8x16_t sqx0 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx0, vshrq_n_u8(qx0, 1)), 6));
+ int8x16_t sqx1 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx1, vshrq_n_u8(qx1, 1)), 6));
+ int8x16_t sqx2 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx2, vshrq_n_u8(qx2, 1)), 6));
+ int8x16_t sqx3 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx3, vshrq_n_u8(qx3, 1)), 6));
+ int8x16_t sqx4 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx4, vshrq_n_u8(qx4, 1)), 6));
+ int8x16_t sqx5 = vreinterpretq_s8_u8(vshrq_n_u8(vhaddq_u8(qx5, vshrq_n_u8(qx5, 1)), 6));
+
+ const int8x16_t qy0 = vld1q_s8(y[i].qs + 160);
+ const int8x16_t qy1 = vld1q_s8(y[i].qs + 176);
+ const int8x16_t qy2 = vld1q_s8(y[i].qs + 192);
+ const int8x16_t qy3 = vld1q_s8(y[i].qs + 208);
+ const int8x16_t qy4 = vld1q_s8(y[i].qs + 224);
+ const int8x16_t qy5 = vld1q_s8(y[i].qs + 240);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumi0 = vdotq_s32(sumi0, sqx0, qy0);
+ sumi1 = vdotq_s32(sumi1, sqx1, qy1);
+ sumi0 = vdotq_s32(sumi0, sqx2, qy2);
+ sumi1 = vdotq_s32(sumi1, sqx3, qy3);
+ sumi0 = vdotq_s32(sumi0, sqx4, qy4);
+ sumi1 = vdotq_s32(sumi1, sqx5, qy5);
+#else
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5));
+#endif
+ }
+
+ const int16x8_t ysum0 = vld1q_s16(y[i].bsums);
+ const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8);
+
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumi0 = vaddq_s32(sumi0, sumi1);
+ sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1)));
+
+ sumf += d * (float) vaddvq_s32(sumi0);
+#else
+ sumi0 = vaddq_s16(sumi0, sumi1);
+ sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1));
+
+ sumf += d * (float) vaddlvq_s16(sumi0);
+#endif
+ }
+
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_tq1_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_tq2_0_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_tq2_0 * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_NEON)
+ float sumf = 0.0f;
+
+ const uint8x16_t m3 = vdupq_n_u8(3);
+
+ for (int i = 0; i < nb; ++i) {
+#if defined(__ARM_FEATURE_DOTPROD)
+ int32x4_t sumi0 = vdupq_n_s32(0);
+ int32x4_t sumi1 = vdupq_n_s32(0);
+#else
+ int16x8_t sumi0 = vdupq_n_s16(0);
+ int16x8_t sumi1 = vdupq_n_s16(0);
+#endif
+
+ for (size_t j = 0; j < sizeof(x->qs); j += 32) {
+ uint8x16_t qx0 = vld1q_u8(x[i].qs + j);
+ uint8x16_t qx1 = vld1q_u8(x[i].qs + j + 16);
+ uint8x16_t qx2 = vshrq_n_u8(qx0, 2);
+ uint8x16_t qx3 = vshrq_n_u8(qx1, 2);
+ uint8x16_t qx4 = vshrq_n_u8(qx0, 4);
+ uint8x16_t qx5 = vshrq_n_u8(qx1, 4);
+ uint8x16_t qx6 = vshrq_n_u8(qx0, 6);
+ uint8x16_t qx7 = vshrq_n_u8(qx1, 6);
+
+ int8x16_t sqx0 = vreinterpretq_s8_u8(vandq_u8(qx0, m3));
+ int8x16_t sqx1 = vreinterpretq_s8_u8(vandq_u8(qx1, m3));
+ int8x16_t sqx2 = vreinterpretq_s8_u8(vandq_u8(qx2, m3));
+ int8x16_t sqx3 = vreinterpretq_s8_u8(vandq_u8(qx3, m3));
+ int8x16_t sqx4 = vreinterpretq_s8_u8(vandq_u8(qx4, m3));
+ int8x16_t sqx5 = vreinterpretq_s8_u8(vandq_u8(qx5, m3));
+ int8x16_t sqx6 = vreinterpretq_s8_u8(vandq_u8(qx6, m3));
+ int8x16_t sqx7 = vreinterpretq_s8_u8(vandq_u8(qx7, m3));
+
+ const int8x16_t qy0 = vld1q_s8(y[i].qs + j*4 + 0);
+ const int8x16_t qy1 = vld1q_s8(y[i].qs + j*4 + 16);
+ const int8x16_t qy2 = vld1q_s8(y[i].qs + j*4 + 32);
+ const int8x16_t qy3 = vld1q_s8(y[i].qs + j*4 + 48);
+ const int8x16_t qy4 = vld1q_s8(y[i].qs + j*4 + 64);
+ const int8x16_t qy5 = vld1q_s8(y[i].qs + j*4 + 80);
+ const int8x16_t qy6 = vld1q_s8(y[i].qs + j*4 + 96);
+ const int8x16_t qy7 = vld1q_s8(y[i].qs + j*4 + 112);
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumi0 = vdotq_s32(sumi0, sqx0, qy0);
+ sumi1 = vdotq_s32(sumi1, sqx1, qy1);
+ sumi0 = vdotq_s32(sumi0, sqx2, qy2);
+ sumi1 = vdotq_s32(sumi1, sqx3, qy3);
+ sumi0 = vdotq_s32(sumi0, sqx4, qy4);
+ sumi1 = vdotq_s32(sumi1, sqx5, qy5);
+ sumi0 = vdotq_s32(sumi0, sqx6, qy6);
+ sumi1 = vdotq_s32(sumi1, sqx7, qy7);
+#else
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx0), vget_low_s8(qy0));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx0), vget_high_s8(qy0));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx1), vget_low_s8(qy1));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx1), vget_high_s8(qy1));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx2), vget_low_s8(qy2));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx2), vget_high_s8(qy2));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx3), vget_low_s8(qy3));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx3), vget_high_s8(qy3));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx4), vget_low_s8(qy4));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx4), vget_high_s8(qy4));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx5), vget_low_s8(qy5));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx5), vget_high_s8(qy5));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx6), vget_low_s8(qy6));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx6), vget_high_s8(qy6));
+ sumi0 = vmlal_s8(sumi0, vget_low_s8(sqx7), vget_low_s8(qy7));
+ sumi1 = vmlal_s8(sumi1, vget_high_s8(sqx7), vget_high_s8(qy7));
+#endif
+ }
+
+ const int16x8_t ysum0 = vld1q_s16(y[i].bsums);
+ const int16x8_t ysum1 = vld1q_s16(y[i].bsums + 8);
+
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
+
+#if defined(__ARM_FEATURE_DOTPROD)
+ sumi0 = vaddq_s32(sumi0, sumi1);
+ sumi0 = vsubq_s32(sumi0, vpaddlq_s16(vaddq_s16(ysum0, ysum1)));
+
+ sumf += d * (float) vaddvq_s32(sumi0);
+#else
+ sumi0 = vaddq_s16(sumi0, sumi1);
+ sumi0 = vsubq_s16(sumi0, vaddq_s16(ysum0, ysum1));
+
+ sumf += d * (float) vaddlvq_s16(sumi0);
+#endif
+ }
+
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_tq2_0_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_q2_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q2_K * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_FEATURE_SVE
+ const int vector_length = svcntb()*8;
+ const svuint8_t m3s = svdup_n_u8(0x3);
+ const svuint32_t m4s = svdup_n_u32(0xF);
+ const svint32_t vzero_sv = svdup_n_s32(0);
+ svfloat32_t acc_sum = svdup_n_f32(0);
+ svbool_t pred_s32 = svptrue_pat_b32(SV_VL4);
+
+ switch (vector_length) {
+ case 128:
+ for (int i = 0; i < nb; ++i) {
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+ svfloat32_t d_broad = svdup_n_f32((float32_t)d);
+ const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
+ svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin);
+
+ const uint8_t * GGML_RESTRICT q2 = x[i].qs;
+ const int8_t * GGML_RESTRICT q8_sv = y[i].qs;
+ const uint8_t * GGML_RESTRICT sc = x[i].scales;
+
+ svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc);
+ const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4));
+
+ mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+4);
+ const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4));
+
+ svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums);
+ svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+4);
+
+ const svint32_t s0 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_2, q8sums_sv_2));
+
+ mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+8);
+ const svint32_t mins_sv_3 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4));
+
+ mins_and_scales_sve = svld1ub_u32(svptrue_b32(), sc+12);
+ const svint32_t mins_sv_4 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_b32(), mins_and_scales_sve, 4));
+
+ q8sums_sv_1 = svld1sh_s32(svptrue_b32(), y[i].bsums+8);
+ q8sums_sv_2 = svld1sh_s32(svptrue_b32(), y[i].bsums+12);
+
+ svint32_t s1 = svadd_s32_x(svptrue_b32(), svmul_s32_x(svptrue_b32(), mins_sv_3, q8sums_sv_1), svmul_s32_x(svptrue_b32(), mins_sv_4, q8sums_sv_2));
+
+ svfloat32_t temp = svcvt_f32_s32_x(svptrue_b32(), svadd_s32_x(svptrue_b32(), s0, s1));
+
+ acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, temp, dmin_broad);
+
+ svint32_t sumi1 = svdup_n_s32(0);
+
+ {
+ const svuint8_t q2bits_1 = svld1_u8(svptrue_b8(), q2);
+ svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_1, m3s));
+ svint8_t q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+ const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc), m4s));
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 0));
+
+ const svuint8_t q2bits_3 = svld1_u8(svptrue_b8(), q2+16);
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_3, m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 1));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 2), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 2));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 2), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv, 3));
+
+
+ const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+4), m4s));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 4), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 0));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 4), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 1));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_1, 6), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 2));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_3, 6), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_1, 3));
+
+ //-------------------------------
+
+ q2 += 32;
+ const svint32_t scales_sv_2 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+8), m4s));
+ const svuint8_t q2bits_2 = svld1_u8(svptrue_b8(), q2);
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_2, m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 0));
+
+ const svuint8_t q2bits_4 = svld1_u8(svptrue_b8(), q2+16);
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), q2bits_4, m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 1));
+
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 2), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 2));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 2), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_2, 3));
+
+
+ const svint32_t scales_sv_3 = svreinterpret_s32_u32(svand_u32_m(svptrue_b32(), svld1ub_u32(svptrue_b32(), sc+12), m4s));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 4), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 0));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 4), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 1));
+
+
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_2, 6), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 2));
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q2bits_4, 6), m3s));
+ q8bytes_sv = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ sumi1 = svmla_s32_m(svptrue_b32(), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), svdup_lane_s32(scales_sv_3, 3));
+ }
+ acc_sum = svmla_f32_m(svptrue_b32(), acc_sum, svcvt_f32_s32_x(svptrue_b32(), sumi1), d_broad);
+ }
+ *s = svaddv_f32(svptrue_b32(), acc_sum);
+ break;
+
+ case 256:
+ case 512:
+ for (int i = 0; i < nb; ++i) {
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+ svfloat32_t d_broad = svdup_n_f32((float32_t)d);
+ const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
+ svfloat32_t dmin_broad = svdup_n_f32((float32_t)dmin);
+
+ const uint8_t * GGML_RESTRICT q2 = x[i].qs;
+ const int8_t * GGML_RESTRICT q8_sv = y[i].qs;
+ const uint8_t * GGML_RESTRICT sc = x[i].scales;
+
+ const svuint32_t mins_and_scales_sve = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc); sc += 8;
+ const svint32_t scales_sv = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, m4s));
+ const svint32_t mins_sv_1 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve, 4));
+ svint32_t q8sums_sv_1 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums);
+
+ const svuint32_t mins_and_scales_sve_1 = svld1ub_u32(svptrue_pat_b32(SV_VL8), sc);
+ const svint32_t scales_sv_1 = svreinterpret_s32_u32(svand_u32_m(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, m4s));
+ const svint32_t mins_sv_2 = svreinterpret_s32_u32(svlsr_n_u32_x(svptrue_pat_b32(SV_VL8), mins_and_scales_sve_1, 4));
+
+ svint32_t q8sums_sv_2 = svld1sh_s32(svptrue_pat_b32(SV_VL8), y[i].bsums+8);
+
+ svfloat32_t temp = svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_1, q8sums_sv_1), svmul_s32_x(svptrue_pat_b32(SV_VL8), mins_sv_2, q8sums_sv_2)));
+
+ acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, temp, dmin_broad);
+
+ svint32_t sumi1 = svdup_n_s32(0);
+
+ {
+ const svuint8_t q2bits_1 = svld1_u8(svptrue_pat_b8(SV_VL32), q2);
+ svint8_t q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_1, m3s));
+ svint8_t q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ svint32_t scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 0), svdup_lane_s32(scales_sv, 1));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1);
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 2), m3s));
+ q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ svint32_t scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 2), svdup_lane_s32(scales_sv, 3));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(svdup_n_s32(0), q2bytes_sv, q8bytes_sv), scale_2);
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 4), m3s));
+ q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv, 4), svdup_lane_s32(scales_sv, 5));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1);
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_1, 6), m3s));
+ q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv, 6), svdup_lane_s32(scales_sv, 7));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2);
+
+ q2 += 32;
+
+ const svuint8_t q2bits_2 = svld1_u8(svptrue_pat_b8(SV_VL32), q2);
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q2bits_2, m3s));
+ q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 0), svdup_lane_s32(scales_sv_1, 1));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1);
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 2), m3s));
+ q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 2), svdup_lane_s32(scales_sv_1, 3));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2);
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 4), m3s));
+ q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ scale_1 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 4), svdup_lane_s32(scales_sv_1, 5));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_1);
+
+ q2bytes_sv = svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q2bits_2, 6), m3s));
+ q8bytes_sv = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ scale_2 = svsel(pred_s32, svdup_lane_s32(scales_sv_1, 6), svdup_lane_s32(scales_sv_1, 7));
+ sumi1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(vzero_sv, q2bytes_sv, q8bytes_sv), scale_2);
+ }
+ acc_sum = svmla_f32_m(svptrue_pat_b32(SV_VL8), acc_sum, svcvt_f32_s32_x(svptrue_pat_b32(SV_VL8), sumi1), d_broad);
+ }
+ *s = svaddv_f32(svptrue_pat_b32(SV_VL8), acc_sum);
+ break;
+
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ }
+
+#elif __ARM_NEON
+ const uint8x16_t m3 = vdupq_n_u8(0x3);
+ const uint8x16_t m4 = vdupq_n_u8(0xF);
+
+ const int32x4_t vzero = vdupq_n_s32(0);
+
+ ggml_int8x16x2_t q2bytes;
+ uint8_t aux[16];
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+ const float dmin = -y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
+
+ const uint8_t * GGML_RESTRICT q2 = x[i].qs;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+ const uint8_t * GGML_RESTRICT sc = x[i].scales;
+
+ const uint8x16_t mins_and_scales = vld1q_u8(sc);
+ const uint8x16_t scales = vandq_u8(mins_and_scales, m4);
+ vst1q_u8(aux, scales);
+
+ const uint8x16_t mins = vshrq_n_u8(mins_and_scales, 4);
+ const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
+ const ggml_int16x8x2_t mins16 = {{vreinterpretq_s16_u16(vmovl_u8(vget_low_u8(mins))), vreinterpretq_s16_u16(vmovl_u8(vget_high_u8(mins)))}};
+ const int32x4_t s0 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[0]), vget_low_s16 (q8sums.val[0])),
+ vmull_s16(vget_high_s16(mins16.val[0]), vget_high_s16(q8sums.val[0])));
+ const int32x4_t s1 = vaddq_s32(vmull_s16(vget_low_s16 (mins16.val[1]), vget_low_s16 (q8sums.val[1])),
+ vmull_s16(vget_high_s16(mins16.val[1]), vget_high_s16(q8sums.val[1])));
+ sum += dmin * vaddvq_s32(vaddq_s32(s0, s1));
+
+ int isum = 0;
+ int is = 0;
+
+// We use this macro instead of a function call because for some reason
+// the code runs 2-3% slower, even if the function is declared inline
+#define MULTIPLY_ACCUM_WITH_SCALE(index)\
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[0], q8bytes.val[0])) * aux[is+(index)];\
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q2bytes.val[1], q8bytes.val[1])) * aux[is+1+(index)];
+
+#define SHIFT_MULTIPLY_ACCUM_WITH_SCALE(shift, index)\
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;\
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[0], (shift)), m3));\
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q2bits.val[1], (shift)), m3));\
+ MULTIPLY_ACCUM_WITH_SCALE((index));
+
+ for (int j = 0; j < QK_K/128; ++j) {
+ const ggml_uint8x16x2_t q2bits = ggml_vld1q_u8_x2(q2); q2 += 32;
+
+ ggml_int8x16x2_t q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q2bytes.val[0] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[0], m3));
+ q2bytes.val[1] = vreinterpretq_s8_u8(vandq_u8(q2bits.val[1], m3));
+
+ MULTIPLY_ACCUM_WITH_SCALE(0);
+
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(2, 2);
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(4, 4);
+ SHIFT_MULTIPLY_ACCUM_WITH_SCALE(6, 6);
+
+ is += 8;
+ }
+
+ sum += d * isum;
+ }
+
+ *s = sum;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q2_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_q3_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const uint32_t kmask1 = 0x03030303;
+ const uint32_t kmask2 = 0x0f0f0f0f;
+
+ const block_q3_K * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_FEATURE_SVE)
+
+ uint32_t aux[3];
+ uint32_t utmp[4];
+
+ const int8_t m32 = 32;
+ const int vector_length = svcntb()*8;
+ const svuint8_t m3b_sv = svdup_n_u8(0x3);
+ const svint32_t vzero_sv = svdup_n_s32(0);
+
+ const svuint8_t m0_sv = svdup_n_u8(1);
+ const svuint8_t m1_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 1);
+ const svuint8_t m2_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 2);
+ const svuint8_t m3_sv = svlsl_n_u8_x(svptrue_b8(), m0_sv, 3);
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * GGML_RESTRICT q3_sv = x[i].qs;
+ const uint8_t * GGML_RESTRICT qh_sv = x[i].hmask;
+ const int8_t * GGML_RESTRICT q8_sv = y[i].qs;
+
+ // Set up scales
+ memcpy(aux, x[i].scales, 12);
+ utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
+ utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
+ utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
+ utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
+
+ int8_t * scale = (int8_t *)utmp;
+
+ for (int j = 0; j < 16; ++j) scale[j] -= m32;
+
+ switch (vector_length) {
+ case 128:
+ {
+ svuint8_t qhbits_sv_1 = svld1_u8(svptrue_b8(), qh_sv);
+ svuint8_t qhbits_sv_2 = svld1_u8(svptrue_b8(), qh_sv+16);
+ svuint8_t q3h_sv;
+
+ svint32_t sumi1_1 = svdup_n_s32(0);
+ svint8_t q3bytes_sv;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const svuint8_t q3bits_sv = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16;
+ const svuint8_t q3bits_sv_1 = svld1_u8(svptrue_b8(), q3_sv); q3_sv += 16;
+ svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+ svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_1), 2);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0]));
+
+ q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m0_sv, qhbits_sv_2), 2);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), q3bits_sv_1, m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1]));
+
+ q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+ q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_1), 1);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2]));
+
+ q3h_sv = svlsl_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m1_sv, qhbits_sv_2), 1);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3]));
+
+
+ scale += 4;
+ q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+ q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_1);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[0]));
+
+ q3h_sv = svbic_u8_x(svptrue_b8(), m2_sv, qhbits_sv_2);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[1]));
+
+
+ q8bytes_1_sv_1 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+ q8bytes_1_sv_2 = svld1_s8(svptrue_b8(), q8_sv); q8_sv += 16;
+
+ q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_1), 1);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), svdup_n_s32((int32_t)scale[2]));
+
+ q3h_sv = svlsr_n_u8_x(svptrue_b8(), svbic_u8_x(svptrue_b8(), m3_sv, qhbits_sv_2), 1);
+ q3bytes_sv = svsub_s8_x(svptrue_b8(), svreinterpret_s8_u8(svand_u8_m(svptrue_b8(), svlsr_n_u8_x(svptrue_b8(), q3bits_sv_1, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ sumi1_1 = svmla_s32_m(svptrue_b32(), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), svdup_n_s32((int32_t)scale[3]));
+
+ if (j == 0) {
+ qhbits_sv_1 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_1, 4);
+ qhbits_sv_2 = svlsr_n_u8_x(svptrue_b8(), qhbits_sv_2, 4);
+ }
+
+ scale += 4;
+ }
+
+ sum += d * (svaddv_s32(svptrue_b32(), sumi1_1));
+ } break;
+ case 256:
+ case 512:
+ {
+ svuint8_t qhbits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), qh_sv);
+ svuint8_t q3h_sv;
+
+ svint32_t sumi1_1 = svdup_n_s32(0);
+ svint8_t q3bytes_sv;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const svuint8_t q3bits_sv = svld1_u8(svptrue_pat_b8(SV_VL32), q3_sv); q3_sv += 32;
+ svint8_t q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+ svint8_t q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m0_sv, qhbits_sv), 2);
+ q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), q3bits_sv, m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+
+ svint32_t scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1]));
+ sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1);
+
+ q3h_sv = svlsl_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m1_sv, qhbits_sv), 1);
+ q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 2), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3]));
+ sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1);
+
+ scale += 4;
+ q8bytes_1_sv_1 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+ q8bytes_1_sv_2 = svld1_s8(svptrue_pat_b8(SV_VL32), q8_sv); q8_sv += 32;
+
+ q3h_sv = svbic_u8_x(svptrue_pat_b8(SV_VL32), m2_sv, qhbits_sv);
+ q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 4), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[0]), svdup_n_s32((int32_t)scale[1]));
+ sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_1), scale_1);
+
+ q3h_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), svbic_u8_x(svptrue_pat_b8(SV_VL32), m3_sv, qhbits_sv), 1);
+ q3bytes_sv = svsub_s8_x(svptrue_pat_b8(SV_VL32), svreinterpret_s8_u8(svand_u8_m(svptrue_pat_b8(SV_VL32), svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q3bits_sv, 6), m3b_sv)), svreinterpret_s8_u8(q3h_sv));
+
+ scale_1 = svsel_s32(svptrue_pat_b32(SV_VL4), svdup_n_s32((int32_t)scale[2]), svdup_n_s32((int32_t)scale[3]));
+ sumi1_1 = svmla_s32_m(svptrue_pat_b32(SV_VL8), sumi1_1, svdot_s32(vzero_sv, q3bytes_sv, q8bytes_1_sv_2), scale_1);
+
+ if (j == 0) {
+ qhbits_sv = svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), qhbits_sv, 4);
+ }
+
+ scale += 4;
+ }
+
+ sum += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), sumi1_1));
+ } break;
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ }
+ }
+ *s = sum;
+
+#elif __ARM_NEON
+
+ uint32_t aux[3];
+ uint32_t utmp[4];
+
+ const uint8x16_t m3b = vdupq_n_u8(0x3);
+ const int32x4_t vzero = vdupq_n_s32(0);
+
+ const uint8x16_t m0 = vdupq_n_u8(1);
+ const uint8x16_t m1 = vshlq_n_u8(m0, 1);
+ const uint8x16_t m2 = vshlq_n_u8(m0, 2);
+ const uint8x16_t m3 = vshlq_n_u8(m0, 3);
+ const int8_t m32 = 32;
+
+ ggml_int8x16x4_t q3bytes;
+
+ float sum = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * GGML_RESTRICT q3 = x[i].qs;
+ const uint8_t * GGML_RESTRICT qh = x[i].hmask;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
+
+ ggml_uint8x16x4_t q3h;
+
+ int32_t isum = 0;
+
+ // Set up scales
+ memcpy(aux, x[i].scales, 12);
+ utmp[3] = ((aux[1] >> 4) & kmask2) | (((aux[2] >> 6) & kmask1) << 4);
+ utmp[2] = ((aux[0] >> 4) & kmask2) | (((aux[2] >> 4) & kmask1) << 4);
+ utmp[1] = (aux[1] & kmask2) | (((aux[2] >> 2) & kmask1) << 4);
+ utmp[0] = (aux[0] & kmask2) | (((aux[2] >> 0) & kmask1) << 4);
+
+ int8_t * scale = (int8_t *)utmp;
+ for (int j = 0; j < 16; ++j) scale[j] -= m32;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ const ggml_uint8x16x2_t q3bits = ggml_vld1q_u8_x2(q3); q3 += 32;
+ const ggml_int8x16x4_t q8bytes_1 = ggml_vld1q_s8_x4(q8); q8 += 64;
+ const ggml_int8x16x4_t q8bytes_2 = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ q3h.val[0] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[0]), 2);
+ q3h.val[1] = vshlq_n_u8(vbicq_u8(m0, qhbits.val[1]), 2);
+ q3h.val[2] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[0]), 1);
+ q3h.val[3] = vshlq_n_u8(vbicq_u8(m1, qhbits.val[1]), 1);
+
+ q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[0], m3b)), vreinterpretq_s8_u8(q3h.val[0]));
+ q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(q3bits.val[1], m3b)), vreinterpretq_s8_u8(q3h.val[1]));
+ q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 2), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
+ q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 2), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
+
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_1.val[0])) * scale[0];
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_1.val[1])) * scale[1];
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_1.val[2])) * scale[2];
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_1.val[3])) * scale[3];
+
+ scale += 4;
+
+ q3h.val[0] = vbicq_u8(m2, qhbits.val[0]);
+ q3h.val[1] = vbicq_u8(m2, qhbits.val[1]);
+ q3h.val[2] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[0]), 1);
+ q3h.val[3] = vshrq_n_u8(vbicq_u8(m3, qhbits.val[1]), 1);
+
+ q3bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 4), m3b)), vreinterpretq_s8_u8(q3h.val[0]));
+ q3bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 4), m3b)), vreinterpretq_s8_u8(q3h.val[1]));
+ q3bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[0], 6), m3b)), vreinterpretq_s8_u8(q3h.val[2]));
+ q3bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vandq_u8(vshrq_n_u8(q3bits.val[1], 6), m3b)), vreinterpretq_s8_u8(q3h.val[3]));
+
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[0], q8bytes_2.val[0])) * scale[0];
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[1], q8bytes_2.val[1])) * scale[1];
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[2], q8bytes_2.val[2])) * scale[2];
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q3bytes.val[3], q8bytes_2.val[3])) * scale[3];
+
+ scale += 4;
+
+ if (j == 0) {
+ qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 4);
+ qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 4);
+ }
+
+ }
+ sum += d * isum;
+
+ }
+
+ *s = sum;
+
+#else
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q3_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+
+}
+
+#ifdef __ARM_FEATURE_SVE
+static inline svuint32_t ggml_decode_q4scales_and_mins_for_mmla(const uint32_t * vx_scales) {
+ const svbool_t pg_all = svptrue_pat_b32(SV_VL4);
+ const svbool_t pg_false = svpfalse_b(); // 0x0000
+ const svbool_t pg_lo_8 = svwhilelt_b8_s32(0, 8); // 0x00ff
+ const svbool_t pg_odd = svzip1_b32(pg_false, pg_lo_8);
+
+ svuint32_t vutmp_hi, vutmp_lo;
+ svuint32_t vx01 = svld1_u32(pg_lo_8, vx_scales);
+ vutmp_hi = svzip1_u32(vx01, vx01);
+ vutmp_hi = svlsr_n_u32_m(pg_odd, vutmp_hi, 2);
+ vutmp_hi = svreinterpret_u32_u64(svand_n_u64_x(pg_all, svreinterpret_u64_u32(vutmp_hi), UINT64_C(0x303030303f3f3f3f)));
+ const svuint32_t vx2 = svdup_u32(vx_scales[2]);
+ vutmp_lo = svlsr_u32_x(pg_all, vx2, svreinterpret_u32_s32(svindex_s32(-2, 2)));
+ vutmp_lo = svand_n_u32_z(pg_odd, vutmp_lo, UINT32_C(0x0f0f0f0f));
+ svuint32_t vutmp = svorr_u32_z(pg_all, vutmp_hi, vutmp_lo);
+ return vutmp;
+}
+#endif
+
+void ggml_vec_dot_q4_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+#ifdef __ARM_FEATURE_MATMUL_INT8
+ assert((nrc == 2) || (nrc == 1));
+#else
+ assert(nrc == 1);
+#endif
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q4_K * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+ static const uint32_t kmask1 = 0x3f3f3f3f;
+ static const uint32_t kmask2 = 0x0f0f0f0f;
+ static const uint32_t kmask3 = 0x03030303;
+
+ uint32_t utmp[4];
+#ifdef __ARM_FEATURE_SVE
+ const int vector_length = ggml_cpu_get_sve_cnt()*8;
+#endif
+
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
+ if (nrc == 2) {
+ svbool_t pg32_2 = svptrue_pat_b32(SV_VL2);
+
+ const block_q4_K * GGML_RESTRICT vx0 = vx;
+ const block_q8_K * GGML_RESTRICT vy0 = vy;
+ const block_q4_K * GGML_RESTRICT vx1 = (const block_q4_K *) ((const uint8_t*)vx + bx);
+ const block_q8_K * GGML_RESTRICT vy1 = (const block_q8_K *) ((const uint8_t*)vy + by);
+
+ union {
+ uint32_t u32[8];
+ uint64_t u64[4];
+ } new_utmp;
+
+ svfloat32_t sumf1 = svdup_n_f32(0);
+
+ switch (vector_length) {
+ case 128:
+ {
+ svbool_t pg_false = svpfalse_b();
+ svbool_t pg_lo_8 = svwhilelt_b8_s32(0, 8);
+ svbool_t vmins_mask1= svzip1_b32(pg_lo_8, pg_false);
+ svbool_t vmins_mask2 = svzip1_b32(pg_false, pg_lo_8);
+ svbool_t pg128_all = svptrue_pat_b8(SV_VL16);
+ for (int i = 0; i < nb; ++i) {
+ svfloat32_t vy_d = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d));
+ svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d)));
+ svfloat32_t svsuper_block_scales = svmul_f32_x(pg128_all, vy_d, vx_d);
+ svfloat32_t vx_dmins = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].dmin)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].dmin)));
+ svfloat32_t vy_dmins = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d));
+ svfloat32_t svdmins = svmul_n_f32_x(pg128_all, svmul_f32_x(pg128_all, vy_dmins, vx_dmins), -1);
+ const uint8_t * GGML_RESTRICT q4_0 = vx0[i].qs;
+ const int8_t * GGML_RESTRICT q8_0 = vy0[i].qs;
+ const uint8_t * GGML_RESTRICT q4_1 = vx1[i].qs;
+ const int8_t * GGML_RESTRICT q8_1 = vy1[i].qs;
+ svint16_t lo = svld1_s16(pg128_all, vy0[i].bsums + 0);
+ svint16_t hi = svld1_s16(pg128_all, vy0[i].bsums + 8);
+ svint16_t sum_tmp1 = svuzp1_s16(lo, hi);
+ svint16_t sum_tmp2 = svuzp2_s16(lo, hi);
+ svint16_t svq8sums_0 = svadd_s16_x(pg128_all, sum_tmp1, sum_tmp2);
+ lo = svld1_s16(pg128_all, vy1[i].bsums + 0);
+ hi = svld1_s16(pg128_all, vy1[i].bsums + 8);
+ sum_tmp1 = svuzp1(lo, hi);
+ sum_tmp2 = svuzp2(lo, hi);
+ svint16_t svq8sums_1 = svadd_s16_x(pg128_all, sum_tmp1, sum_tmp2);
+ svuint32_t decoded_scales0 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx0[i].scales);
+ svuint32_t decoded_scales1 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx1[i].scales);
+ svuint32x2_t decoded_scales = svcreate2_u32(decoded_scales0, decoded_scales1);
+ svst2_u32(pg128_all, new_utmp.u32, decoded_scales);
+ svint16_t svmins8_0 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u32(svuzp1_u32(svld1_u32(vmins_mask1, new_utmp.u32+4), svdup_n_u32(0)))));
+ svint16_t svmins8_1 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u32(svuzp2_u32(svld1_u32(vmins_mask2, new_utmp.u32+4), svdup_n_u32(0)))));
+ svint32_t svsumfs_tmp1 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_0, svmins8_0));
+ svint32_t svsumfs_tmp2 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_0, svmins8_1));
+ svint32_t svsumfs_tmp3 = svtrn1_s32(svsumfs_tmp1, svsumfs_tmp2);
+ svint32_t svsumfs_tmp4 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_1, svmins8_0));
+ svint32_t svsumfs_tmp5 = svreinterpret_s32_s64(svdot_s64(svdup_n_s64(0), svq8sums_1, svmins8_1));
+ svint32_t svsumfs_tmp6 = svtrn1_s32(svsumfs_tmp4, svsumfs_tmp5);
+ svint32_t svsumfs_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(svsumfs_tmp3), svreinterpret_s64_s32(svsumfs_tmp6)));
+ svint32_t svsumfs_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(svsumfs_tmp3), svreinterpret_s64_s32(svsumfs_tmp6)));
+ svint32_t svsumfs_tmp = svadd_s32_x(pg128_all, svsumfs_tmp7, svsumfs_tmp8);
+ svint32_t svscales, sumi1, sumi2;
+ svint32_t acc_sumif1 = svdup_n_s32(0);
+ svint32_t acc_sumif2 = svdup_n_s32(0);
+ svint8_t q4bytes_0_l, q4bytes_0_h, q4bytes_1_l, q4bytes_1_h, l0, l1, l2, l3,
+ q8bytes_0_h, q8bytes_0_l, q8bytes_1_h, q8bytes_1_l, r0, r1, r2, r3;
+#pragma GCC unroll 1
+ for (int j = 0; j < QK_K/64; ++j) {
+ q4bytes_0_l = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0), 0xf));
+ q4bytes_1_l = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1), 0xf));
+ q4bytes_0_h = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0+16), 0xf));
+ q4bytes_1_h = svreinterpret_s8_u8(svand_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1+16), 0xf));
+ l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l)));
+ l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l)));
+ l2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h)));
+ l3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h)));
+ q8bytes_0_h = svld1_s8(pg128_all, q8_0);
+ q8bytes_1_h = svld1_s8(pg128_all, q8_1);
+ q8bytes_0_l = svld1_s8(pg128_all, q8_0+16);
+ q8bytes_1_l = svld1_s8(pg128_all, q8_1+16);
+ r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h)));
+ r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h)));
+ r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l)));
+ r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l)));
+ sumi1 = svmmla_s32(svmmla_s32(svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), r2, l2), r3, l3);
+ svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg128_all, svlsl_n_u32_x(pg128_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-1)), 24));
+ acc_sumif1 = svmla_s32_x(pg128_all, acc_sumif1, svscales, sumi1);
+
+ q4bytes_0_l = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0), 4));
+ q4bytes_1_l = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1), 4));
+ q4bytes_0_h = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_0+16), 4));
+ q4bytes_1_h = svreinterpret_s8_u8(svlsr_n_u8_x(pg128_all, svld1_u8(pg128_all, q4_1+16), 4));
+ l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l)));
+ l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_l), svreinterpret_s64_s8(q4bytes_1_l)));
+ l2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h)));
+ l3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q4bytes_0_h), svreinterpret_s64_s8(q4bytes_1_h)));
+ q8bytes_0_h = svld1_s8(pg128_all, q8_0+32);
+ q8bytes_1_h = svld1_s8(pg128_all, q8_1+32);
+ q8bytes_0_l = svld1_s8(pg128_all, q8_0+48);
+ q8bytes_1_l = svld1_s8(pg128_all, q8_1+48);
+ r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h)));
+ r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_h), svreinterpret_s64_s8(q8bytes_1_h)));
+ r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l)));
+ r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0_l), svreinterpret_s64_s8(q8bytes_1_l)));
+ sumi2 = svmmla_s32(svmmla_s32(svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), r2, l2), r3, l3);
+ svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg128_all, svlsl_n_u32_x(pg128_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-2)), 24));
+ acc_sumif2 = svmla_s32_x(pg128_all, acc_sumif2, svscales, sumi2);
+ q4_0 += 32; q4_1 += 32; q8_0 += 64; q8_1 += 64;
+ }
+ sumf1 = svmla_f32_x(pg128_all,
+ svmla_f32_x(pg128_all,
+ sumf1,
+ svcvt_f32_x(pg128_all,
+ svadd_s32_x(pg128_all, acc_sumif1, acc_sumif2)),
+ svsuper_block_scales),
+ svdmins,
+ svcvt_f32_s32_x(pg128_all, svsumfs_tmp));
+ } //end of for nb
+ } // end of case 128
+ break;
+ case 256:
+ case 512:
+ {
+ const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4);
+ const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16);
+ const svbool_t pg256_all = svptrue_pat_b8(SV_ALL);
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * GGML_RESTRICT q4_0 = vx0[i].qs;
+ const int8_t * GGML_RESTRICT q8_0 = vy0[i].qs;
+ const uint8_t * GGML_RESTRICT q4_1 = vx1[i].qs;
+ const int8_t * GGML_RESTRICT q8_1 = vy1[i].qs;
+ svint32_t svscales, sumi1, sumi2;
+ svint32_t acc_sumif1 = svdup_n_s32(0);
+ svint32_t acc_sumif2 = svdup_n_s32(0);
+ svint8_t l0, l1, l2, l3, r0, r1, r2, r3;
+ svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d)));
+ svfloat64_t vy_d_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)));
+ svfloat32_t vy_d = svreinterpret_f32_f64(svuzp1_f64(vy_d_tmp, vy_d_tmp));
+ svfloat32_t svsuper_block_scales = svmul_f32_z(pg32_4, vy_d, vx_d);
+ svfloat32_t vx_dmins = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].dmin)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].dmin)));
+ svfloat64_t vy_dmins_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)));
+ svfloat32_t vy_dmins = svreinterpret_f32_f64(svuzp1_f64(vy_dmins_tmp, vy_dmins_tmp));
+ svfloat32_t svdmins = svmul_n_f32_x(pg32_4, svmul_f32_x(pg32_4, vx_dmins, vy_dmins), -1);
+ svint16_t rc1 = svuzp1_s16(svld1_s16(pg256_all, vy0[i].bsums), svld1_s16(pg256_all, vy1[i].bsums));
+ svint16_t rc2 = svuzp2_s16(svld1_s16(pg256_all, vy0[i].bsums), svld1_s16(pg256_all, vy1[i].bsums));
+ svint16_t svq8sums = svadd_s16_x(pg256_all, rc1, rc2);
+ svuint32_t decoded_scales0 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx0[i].scales);
+ svuint32_t decoded_scales1 = ggml_decode_q4scales_and_mins_for_mmla((const uint32_t *)vx1[i].scales);
+ svuint32x2_t decoded_scales = svcreate2_u32(decoded_scales0, decoded_scales1);
+ svst2_u32(pg8_16, new_utmp.u32, decoded_scales);
+ svint16_t new_svq8sums_0 = svreinterpret_s16_u64(svtrn1_u64(svreinterpret_u64_s16(svq8sums), svreinterpret_u64_s16(svq8sums)));
+ svint16_t new_svq8sums_1 = svreinterpret_s16_u64(svtrn2_u64(svreinterpret_u64_s16(svq8sums), svreinterpret_u64_s16(svq8sums)));
+ svuint64_t new_mins_0 = svdup_u64(new_utmp.u64[2]);
+ svuint64_t new_mins_1 = svdup_u64(new_utmp.u64[3]);
+ svint16_t new_svmins8_0 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u64(new_mins_0)));
+ svint16_t new_svmins8_1 = svreinterpret_s16_u16(svunpklo_u16(svreinterpret_u8_u64(new_mins_1)));
+ svint64_t dot_prod_0 = svdot_s64(svdup_s64(0), new_svmins8_0, new_svq8sums_0);
+ svint64_t dot_prod_1 = svdot_s64(dot_prod_0, new_svmins8_1, new_svq8sums_1);
+ svfloat32_t converted_dot_prod_1 = svcvt_f32_s64_x(pg256_all, dot_prod_1);
+ svfloat32_t svsumfs_tmp = svuzp1_f32(converted_dot_prod_1, converted_dot_prod_1);
+
+#pragma GCC unroll 1
+ for (int j = 0; j < QK_K/64; ++j) {
+ svuint8_t q4bytes_0 = svand_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_0), 0xf);
+ svuint8_t q4bytes_1 = svand_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_1), 0xf);
+ svuint8_t q4bytes_2 = svlsr_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_0), 4);
+ svuint8_t q4bytes_3 = svlsr_n_u8_x(pg256_all, svld1_u8(pg256_all, q4_1), 4);
+ l0 = svreinterpret_s8_u64(svzip1_u64(svreinterpret_u64_u8(q4bytes_0), svreinterpret_u64_u8(q4bytes_1)));
+ l1 = svreinterpret_s8_u64(svzip2_u64(svreinterpret_u64_u8(q4bytes_0), svreinterpret_u64_u8(q4bytes_1)));
+ l2 = svreinterpret_s8_u64(svzip1_u64(svreinterpret_u64_u8(q4bytes_2), svreinterpret_u64_u8(q4bytes_3)));
+ l3 = svreinterpret_s8_u64(svzip2_u64(svreinterpret_u64_u8(q4bytes_2), svreinterpret_u64_u8(q4bytes_3)));
+ svint8_t q8bytes_0 = svld1_s8(pg256_all, q8_0);
+ svint8_t q8bytes_1 = svld1_s8(pg256_all, q8_1);
+ svint8_t q8bytes_2 = svld1_s8(pg256_all, q8_0+32);
+ svint8_t q8bytes_3 = svld1_s8(pg256_all, q8_1+32);
+ r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1)));
+ r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1)));
+ r2 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_2), svreinterpret_s64_s8(q8bytes_3)));
+ r3 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_2), svreinterpret_s64_s8(q8bytes_3)));
+ sumi1 = svmmla(svmmla(svdup_n_s32(0), r0, l0), r1, l1);
+ svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg256_all, svlsl_n_u32_x(pg256_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-1)), 24));
+ acc_sumif1 = svmla_s32_x(pg256_all, acc_sumif1, svscales, sumi1);
+ sumi2 = svmmla(svmmla(svdup_n_s32(0), r2, l2), r3, l3);
+ svscales = svreinterpret_s32_u32(svlsr_n_u32_x(pg256_all, svlsl_n_u32_x(pg256_all, svreinterpret_u32_u64(svdup_n_u64(new_utmp.u64[j/2])), 8*(4-2*(j%2)-2)), 24));
+ acc_sumif2 = svmla_s32_x(pg256_all, acc_sumif2, svscales, sumi2);
+ q4_0 += 32; q4_1 += 32; q8_0 += 64; q8_1 += 64;
+ }
+ svint32_t acc_sumif = svadd_s32_x(pg256_all, acc_sumif1, acc_sumif2);
+ svint32_t swap_acc_sumif = svext_s32(acc_sumif, acc_sumif, 4);
+ acc_sumif = svadd_s32_x(pg32_4, acc_sumif, swap_acc_sumif);
+ sumf1 = svmla_f32_x(pg32_4,
+ svmla_f32_x(pg32_4,
+ sumf1,
+ svcvt_f32_x(pg32_4, acc_sumif),
+ svsuper_block_scales),
+ svdmins,
+ svsumfs_tmp);
+ } // end of for nb
+ } // end of case 256-512
+ break;
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ }
+
+ svst1_f32(pg32_2, s, sumf1);
+ svst1_f32(pg32_2, s + bs, svreinterpret_f32_u8(svext_u8(svreinterpret_u8_f32(sumf1), svdup_n_u8(0), 8)));
+
+ return;
+ }
+#elif defined(__ARM_FEATURE_MATMUL_INT8)
+ if (nrc == 2) {
+ const block_q4_K * GGML_RESTRICT x0 = x;
+ const block_q4_K * GGML_RESTRICT x1 = (const block_q4_K *) ((const uint8_t *)vx + bx);
+ const block_q8_K * GGML_RESTRICT y0 = y;
+ const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by);
+
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+
+ float32x4_t vfsum = vdupq_n_f32(0.0f);
+
+ for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) {
+ const uint8_t * GGML_RESTRICT qx0 = x0->qs;
+ const uint8_t * GGML_RESTRICT qx1 = x1->qs;
+ const int8_t * GGML_RESTRICT qy0 = y0->qs;
+ const int8_t * GGML_RESTRICT qy1 = y1->qs;
+
+ // decode scales and mins
+ int8_t x0_scales[8], x1_scales[8];
+ int16x8_t x0_mins, x1_mins;
+ {
+ uint32_t scales_mins[3];
+ memcpy(scales_mins, x0->scales, 12);
+ const uint32_t mins_0_3 = scales_mins[1] & kmask1;
+ const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4);
+ const uint32x2_t mins = {mins_0_3, mins_4_7};
+ x0_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins)));
+ uint32_t scales[2];
+ scales[0] = scales_mins[0] & kmask1; // scales 0~3
+ scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7
+ memcpy(x0_scales, scales, 8);
+ }
+ {
+ uint32_t scales_mins[3];
+ memcpy(scales_mins, x1->scales, 12);
+ const uint32_t mins_0_3 = scales_mins[1] & kmask1;
+ const uint32_t mins_4_7 = ((scales_mins[2] >> 4) & kmask2) | (((scales_mins[1] >> 6) & kmask3) << 4);
+ const uint32x2_t mins = {mins_0_3, mins_4_7};
+ x1_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins)));
+ uint32_t scales[2];
+ scales[0] = scales_mins[0] & kmask1; // scales 0~3
+ scales[1] = (scales_mins[2] & kmask2) | (((scales_mins[0] >> 6) & kmask3) << 4); // scales 4~7
+ memcpy(x1_scales, scales, 8);
+ }
+
+ int32x4_t visum = {0};
+
+ // process 64 data points per iteration, totally 256 data points
+ for (int j = 0; j < QK_K / 64; ++j, qx0 += 32, qx1 += 32, qy0 += 64, qy1 += 64) {
+ const int8x16x4_t vy0 = vld1q_s8_x4(qy0);
+ const int8x16x4_t vy1 = vld1q_s8_x4(qy1);
+
+ int8x16_t vx0[4], vx1[4];
+ {
+ const uint8x16x2_t vv = vld1q_u8_x2(qx0);
+ vx0[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b));
+ vx0[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b));
+ vx0[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4));
+ vx0[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4));
+ }
+ {
+ const uint8x16x2_t vv = vld1q_u8_x2(qx1);
+ vx1[0] = vreinterpretq_s8_u8(vandq_u8(vv.val[0], m4b));
+ vx1[1] = vreinterpretq_s8_u8(vandq_u8(vv.val[1], m4b));
+ vx1[2] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[0], 4));
+ vx1[3] = vreinterpretq_s8_u8(vshrq_n_u8(vv.val[1], 4));
+ }
+
+ // process 32 data points (share same block scale) per iteration
+ for (int k = 0; k < 2; ++k) {
+ const int blk = j * 2 + k;
+ const int32x4_t block_scale = {
+ x0_scales[blk],
+ x0_scales[blk],
+ x1_scales[blk],
+ x1_scales[blk],
+ };
+
+ int32x4_t vr = {0};
+ for (int l = 0; l < 2; ++l) {
+ const int idx = k * 2 + l;
+ const int64x2_t vx0_s64 = vreinterpretq_s64_s8(vx0[idx]);
+ const int64x2_t vx1_s64 = vreinterpretq_s64_s8(vx1[idx]);
+ const int64x2_t vy0_s64 = vreinterpretq_s64_s8(vy0.val[idx]);
+ const int64x2_t vy1_s64 = vreinterpretq_s64_s8(vy1.val[idx]);
+ const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vx0_s64, vx1_s64));
+ const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vx0_s64, vx1_s64));
+ const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vy0_s64, vy1_s64));
+ const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vy0_s64, vy1_s64));
+ vr = vmmlaq_s32(vr, vx_l, vy_l);
+ vr = vmmlaq_s32(vr, vx_h, vy_h);
+ }
+ // apply block scale, will NOT overflow
+ // block_scale * sum_256(int4*int8) <= 2^(8+8+4+8) = 28 bits
+ visum = vmlaq_s32(visum, vr, block_scale);
+ }
+ }
+
+ // adjust bias, apply superblock scale
+ {
+ int32_t bias[4];
+ // no obvious uplift from sve sdot-16, just use neon mul add
+ const int16x8_t y0_sums = vpaddq_s16(vld1q_s16(y0->bsums), vld1q_s16(y0->bsums+8));
+ const int16x8_t y1_sums = vpaddq_s16(vld1q_s16(y1->bsums), vld1q_s16(y1->bsums+8));
+ bias[0] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x0_mins)),
+ vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x0_mins))));
+ bias[1] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x0_mins)),
+ vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x0_mins))));
+ bias[2] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y0_sums), vget_low_s16(x1_mins)),
+ vmull_s16(vget_high_s16(y0_sums), vget_high_s16(x1_mins))));
+ bias[3] = vaddvq_s32(vaddq_s32(vmull_s16(vget_low_s16(y1_sums), vget_low_s16(x1_mins)),
+ vmull_s16(vget_high_s16(y1_sums), vget_high_s16(x1_mins))));
+ const float32x4_t dmins = {
+ GGML_CPU_FP16_TO_FP32(x0->dmin) * y0->d,
+ GGML_CPU_FP16_TO_FP32(x0->dmin) * y1->d,
+ GGML_CPU_FP16_TO_FP32(x1->dmin) * y0->d,
+ GGML_CPU_FP16_TO_FP32(x1->dmin) * y1->d,
+ };
+ vfsum = vmlsq_f32(vfsum, vcvtq_f32_s32(vld1q_s32(bias)), dmins);
+
+ const float32x4_t superblock_scale = {
+ GGML_CPU_FP16_TO_FP32(x0->d) * y0->d,
+ GGML_CPU_FP16_TO_FP32(x0->d) * y1->d,
+ GGML_CPU_FP16_TO_FP32(x1->d) * y0->d,
+ GGML_CPU_FP16_TO_FP32(x1->d) * y1->d,
+ };
+ vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale);
+ }
+ }
+
+ // vfsum = ABCD -> ACBD
+ // AC -> s, BD -> (s+bs)
+ vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2));
+ vst1_f32(s, vget_low_f32 (vfsum));
+ vst1_f32(s + bs, vget_high_f32(vfsum));
+
+ return;
+ }
+#endif
+
+#ifdef __ARM_FEATURE_SVE
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
+
+ const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
+
+ memcpy(utmp, x[i].scales, K_SCALE_SIZE);
+
+ uint32x2_t mins8 = { 0 };
+ mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
+ mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
+
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[0] &= kmask1;
+
+ const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
+ const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
+ vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
+ sumf -= dmin * vaddvq_s32(prod);
+
+ const uint8_t * scales = (const uint8_t *)utmp;
+
+ const uint8_t * GGML_RESTRICT q4 = x[i].qs;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ const svuint8_t m4b = svdup_n_u8(0xf);
+ const svint32_t mzero = svdup_n_s32(0);
+ svint32_t sumi1 = svdup_n_s32(0);
+ svint32_t sumi1_1 = svdup_n_s32(0);
+ svint32_t sumi1_2 = svdup_n_s32(0);
+ svint32_t sumi2 = svdup_n_s32(0);
+ svint32_t sumi2_1 = svdup_n_s32(0);
+ svint32_t sumi2_2 = svdup_n_s32(0);
+ switch (vector_length) {
+ case 128:
+ {
+ for (int j = 0; j < QK_K/64; ++j) {
+ svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), m4b));
+ svint8_t q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
+ sumi1_1 = svmla_n_s32_x(svptrue_b32(), sumi1_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]);
+ q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), m4b));
+ q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
+ sumi1_2 = svmla_n_s32_x(svptrue_b32(), sumi1_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]);
+
+ q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4), 4));
+ q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
+ sumi2_1 = svmla_n_s32_x(svptrue_b32(), sumi2_1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]);
+ q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_b8(), svld1_u8(svptrue_b8(), q4+16), 4));
+ q8bytes = svld1_s8(svptrue_b8(), q8); q8 += 16;
+ sumi2_2 = svmla_n_s32_x(svptrue_b32(), sumi2_2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]);
+ q4 += 32;
+ }
+ sumi1 = svadd_s32_x(svptrue_b32(), sumi1_1, sumi1_2);
+ sumi2 = svadd_s32_x(svptrue_b32(), sumi2_1, sumi2_2);
+ sumf += d * (svaddv_s32(svptrue_b32(), svadd_s32_x(svptrue_b32(), sumi1, sumi2)));
+ } break;
+ case 256:
+ case 512:
+ {
+ for (int j = 0; j < QK_K/64; ++j) {
+ const svuint8_t q4bits = svld1_u8(svptrue_pat_b8(SV_VL32), q4); q4 += 32;
+ svint8_t q4bytes = svreinterpret_s8_u8(svand_u8_x(svptrue_pat_b8(SV_VL32), q4bits, m4b));
+ svint8_t q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32;
+ sumi1 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi1, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+0]);
+
+ q4bytes = svreinterpret_s8_u8(svlsr_n_u8_x(svptrue_pat_b8(SV_VL32), q4bits, 4));
+ q8bytes = svld1_s8(svptrue_pat_b8(SV_VL32), q8); q8 += 32;
+ sumi2 = svmla_n_s32_x(svptrue_pat_b32(SV_VL8), sumi2, svdot_s32(mzero, q4bytes, q8bytes), scales[2*j+1]);
+ }
+ sumf += d * (svaddv_s32(svptrue_pat_b32(SV_VL8), svadd_s32_x(svptrue_pat_b32(SV_VL8), sumi1, sumi2)));
+ } break;
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ }
+ }
+ *s = sumf;
+#elif defined __ARM_NEON
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+ const int32x4_t mzero = vdupq_n_s32(0);
+
+ ggml_int8x16x2_t q4bytes;
+ ggml_int8x16x2_t q8bytes;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
+
+ const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
+
+ memcpy(utmp, x[i].scales, 12);
+
+ uint32x2_t mins8 = { 0 };
+ mins8 = vset_lane_u32(utmp[1] & kmask1, mins8, 0);
+ mins8 = vset_lane_u32(((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4), mins8, 1);
+
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[0] &= kmask1;
+
+ const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins8)));
+ const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
+ vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
+ sumf -= dmin * vaddvq_s32(prod);
+
+ const uint8_t * scales = (const uint8_t *)utmp;
+
+ const uint8_t * GGML_RESTRICT q4 = x[i].qs;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ int32_t sumi1 = 0;
+ int32_t sumi2 = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+ const ggml_uint8x16x2_t q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
+
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[0], m4b));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vandq_u8 (q4bits.val[1], m4b));
+
+ const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+ sumi1 += vaddvq_s32(p1) * scales[2*j+0];
+
+ q8bytes = ggml_vld1q_s8_x2(q8); q8 += 32;
+ q4bytes.val[0] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[0], 4));
+ q4bytes.val[1] = vreinterpretq_s8_u8(vshrq_n_u8(q4bits.val[1], 4));
+
+ const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(mzero, q4bytes.val[0], q8bytes.val[0]), q4bytes.val[1], q8bytes.val[1]);
+
+ sumi2 += vaddvq_s32(p2) * scales[2*j+1];
+ }
+
+ sumf += d * (sumi1 + sumi2);
+
+ }
+
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q4_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_q5_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q5_K * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+ static const uint32_t kmask1 = 0x3f3f3f3f;
+ static const uint32_t kmask2 = 0x0f0f0f0f;
+ static const uint32_t kmask3 = 0x03030303;
+
+ uint32_t utmp[4];
+
+
+#ifdef __ARM_NEON
+ const uint8x16_t m4b = vdupq_n_u8(0xf);
+ const uint8x16_t mone = vdupq_n_u8(1);
+ const uint8x16_t mtwo = vdupq_n_u8(2);
+ const int32x4_t mzero = vdupq_n_s32(0);
+
+ ggml_int8x16x4_t q5bytes;
+
+ float sumf = 0;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d);
+ const float dmin = y[i].d * GGML_CPU_FP16_TO_FP32(x[i].dmin);
+
+ const int16x8_t q8sums = vpaddq_s16(vld1q_s16(y[i].bsums), vld1q_s16(y[i].bsums + 8));
+
+ memcpy(utmp, x[i].scales, 12);
+ utmp[3] = ((utmp[2] >> 4) & kmask2) | (((utmp[1] >> 6) & kmask3) << 4);
+ const uint32_t uaux = utmp[1] & kmask1;
+ utmp[1] = (utmp[2] & kmask2) | (((utmp[0] >> 6) & kmask3) << 4);
+ utmp[2] = uaux;
+ utmp[0] &= kmask1;
+
+ const uint8x8_t mins8 = vld1_u8((const uint8_t*)utmp + 8);
+ const int16x8_t mins = vreinterpretq_s16_u16(vmovl_u8(mins8));
+ const int32x4_t prod = vaddq_s32(vmull_s16(vget_low_s16 (q8sums), vget_low_s16 (mins)),
+ vmull_s16(vget_high_s16(q8sums), vget_high_s16(mins)));
+ int32_t sumi_mins = vaddvq_s32(prod);
+
+ const uint8_t * scales = (const uint8_t *)utmp;
+
+ const uint8_t * GGML_RESTRICT q5 = x[i].qs;
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh);
+
+ ggml_uint8x16x4_t q5h;
+
+ int32_t sumi = 0;
+
+ for (int j = 0; j < QK_K/64; ++j) {
+
+ const ggml_uint8x16x2_t q5bits = ggml_vld1q_u8_x2(q5); q5 += 32;
+ const ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ q5h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
+ q5h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
+ q5h.val[2] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[0]), 3);
+ q5h.val[3] = vshlq_n_u8(vandq_u8(mtwo, qhbits.val[1]), 3);
+ qhbits.val[0] = vshrq_n_u8(qhbits.val[0], 2);
+ qhbits.val[1] = vshrq_n_u8(qhbits.val[1], 2);
+
+ q5bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[0], m4b), q5h.val[0]));
+ q5bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q5bits.val[1], m4b), q5h.val[1]));
+ q5bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[0], 4), q5h.val[2]));
+ q5bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q5bits.val[1], 4), q5h.val[3]));
+
+ sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[0], q8bytes.val[0]), q5bytes.val[1], q8bytes.val[1])) * *scales++;
+ sumi += vaddvq_s32(ggml_vdotq_s32(ggml_vdotq_s32(mzero, q5bytes.val[2], q8bytes.val[2]), q5bytes.val[3], q8bytes.val[3])) * *scales++;
+ }
+
+ sumf += d * sumi - dmin * sumi_mins;
+ }
+
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(kmask1);
+ UNUSED(kmask2);
+ UNUSED(kmask3);
+ UNUSED(utmp);
+ ggml_vec_dot_q5_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_q6_K_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+#ifdef __ARM_FEATURE_MATMUL_INT8
+ assert((nrc == 2) || (nrc == 1));
+#else
+ assert(nrc == 1);
+#endif
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_q6_K * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#ifdef __ARM_FEATURE_SVE
+ const int vector_length = ggml_cpu_get_sve_cnt()*8;
+#endif
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
+ if (nrc == 2) {
+ const svbool_t pg32_2 = svptrue_pat_b32(SV_VL2);
+
+ svfloat32_t sum = svdup_n_f32(0);
+
+ const block_q6_K * GGML_RESTRICT vx0 = vx;
+ const block_q8_K * GGML_RESTRICT vy0 = vy;
+ const block_q6_K * GGML_RESTRICT vx1 = (const block_q6_K *) ((const uint8_t*)vx + bx);
+ const block_q8_K * GGML_RESTRICT vy1 = (const block_q8_K *) ((const uint8_t*)vy + by);
+
+ switch (vector_length) {
+ case 128:
+ {
+ const svbool_t pg128_all = svptrue_pat_b8(SV_ALL);
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * GGML_RESTRICT ql0 = vx0[i].ql;
+ const uint8_t * GGML_RESTRICT qh0 = vx0[i].qh;
+ const uint8_t * GGML_RESTRICT ql1 = vx1[i].ql;
+ const uint8_t * GGML_RESTRICT qh1 = vx1[i].qh;
+ const int8_t * GGML_RESTRICT q80 = vy0[i].qs;
+ const int8_t * GGML_RESTRICT q81 = vy1[i].qs;
+
+ const int8_t * GGML_RESTRICT scale0 = vx0[i].scales;
+ const int8_t * GGML_RESTRICT scale1 = vx1[i].scales;
+
+ svfloat32_t vy_d = svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d));
+ svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d)));
+ svfloat32_t svsuper_block_scales = svmul_f32_x(pg128_all, vy_d, vx_d);
+ // process q8sum summation 128 bit route
+ const svint16_t q8sums_01 = svld1_s16(pg128_all, vy0[i].bsums);
+ const svint16_t q8sums_02 = svld1_s16(pg128_all, vy0[i].bsums + 8);
+ const svint16_t q8sums_11 = svld1_s16(pg128_all, vy1[i].bsums);
+ const svint16_t q8sums_12 = svld1_s16(pg128_all, vy1[i].bsums + 8);
+ const svint64x2_t q6scales_0_tmp = svld2_s64(pg128_all, (const int64_t *)scale0);
+ const svint16_t q6scales_01 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_0_tmp, 0)));
+ const svint16_t q6scales_02 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_0_tmp, 1)));
+ const svint64x2_t q6scales_1_tmp = svld2_s64(pg128_all, (const int64_t *)scale1);
+ const svint16_t q6scales_11 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_1_tmp, 0)));
+ const svint16_t q6scales_12 = svunpklo_s16(svreinterpret_s8_s64(svget2_s64(q6scales_1_tmp, 1)));
+ const svint64_t prod = svdup_n_s64(0);
+
+ svint32_t isum_tmp1 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_01, q6scales_01), q8sums_02, q6scales_02));
+ svint32_t isum_tmp2 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_01, q6scales_11), q8sums_02, q6scales_12));
+ svint32_t isum_tmp3 = svtrn1_s32(isum_tmp1, isum_tmp2);
+ svint32_t isum_tmp4 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_11, q6scales_01), q8sums_12, q6scales_02));
+ svint32_t isum_tmp5 = svreinterpret_s32_s64(svdot_s64(svdot_s64(prod, q8sums_11, q6scales_11), q8sums_12, q6scales_12));
+ svint32_t isum_tmp6 = svtrn1_s32(isum_tmp4, isum_tmp5);
+ svint32_t isum_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(isum_tmp3), svreinterpret_s64_s32(isum_tmp6)));
+ svint32_t isum_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(isum_tmp3), svreinterpret_s64_s32(isum_tmp6)));
+ svint32_t svisum_mins = svadd_s32_x(pg128_all, isum_tmp7, isum_tmp8);
+
+ // process mmla
+ svint8_t l0, l1, r0, r1;
+ svint32_t isum_tmp = svdup_n_s32(0);
+ for (int j = 0; j < QK_K/128; ++j) {
+ for (int k = 0; k < 8; ++k) {
+ svuint8_t qhbits_0 = svld1_u8(pg128_all, qh0+16*(k%2));
+ svuint8_t qhbits_1 = svld1_u8(pg128_all, qh1+16*(k%2));
+ svuint8_t q6bits_0 = svld1_u8(pg128_all, ql0+16*(k%4));
+ svuint8_t q6bits_1 = svld1_u8(pg128_all, ql1+16*(k%4));
+ const int ql_pos = (k/4)*4;
+ svuint8_t q6bytes_0_lo = (ql_pos < 4) ? svand_n_u8_x(pg128_all, q6bits_0, 0xf) : svlsr_n_u8_x(pg128_all, q6bits_0, 4);
+ svuint8_t q6bytes_1_lo = (ql_pos < 4) ? svand_n_u8_x(pg128_all, q6bits_1, 0xf) : svlsr_n_u8_x(pg128_all, q6bits_1, 4);
+ const int qh_pos = (k/2)*2;
+ svuint8_t q6bytes_0_hi = svand_n_u8_x(pg128_all, qhbits_0, 0x3 << qh_pos);
+ svuint8_t q6bytes_1_hi = svand_n_u8_x(pg128_all, qhbits_1, 0x3 << qh_pos);
+ svint8_t q6bytes_0, q6bytes_1;
+ if (qh_pos <= 4) {
+ q6bytes_0 = svreinterpret_s8_u8(svmla_n_u8_x(pg128_all, q6bytes_0_lo, q6bytes_0_hi, 1 << (4 - qh_pos)));
+ q6bytes_1 = svreinterpret_s8_u8(svmla_n_u8_x(pg128_all, q6bytes_1_lo, q6bytes_1_hi, 1 << (4 - qh_pos)));
+ } else {
+ q6bytes_0 = svreinterpret_s8_u8(svorr_u8_x(pg128_all, q6bytes_0_lo, svlsr_n_u8_x(pg128_all, q6bytes_0_hi, (qh_pos - 4))));
+ q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg128_all, q6bytes_1_lo, svlsr_n_u8_x(pg128_all, q6bytes_1_hi, (qh_pos - 4))));
+ }
+ svint8_t q8bytes_0 = svld1_s8(pg128_all, q80+16*(k%8));
+ svint8_t q8bytes_1 = svld1_s8(pg128_all, q81+16*(k%8));
+ l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1)));
+ l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1)));
+ r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1)));
+ r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1)));
+ svint32_t svscale = svzip1_s32(svdup_n_s32(scale0[k]), svdup_n_s32(scale1[k]));
+ isum_tmp = svmla_s32_x(pg128_all, isum_tmp, svmmla_s32(svmmla_s32(svdup_n_s32(0), r0, l0), r1, l1), svscale);
+ }
+ qh0 += 32; qh1 += 32;
+ ql0 += 64; ql1 += 64;
+ q80 += 128; q81 += 128;
+ scale0 += 8; scale1 += 8;
+ }
+ sum = svmla_f32_x(pg128_all, sum,
+ svcvt_f32_x(pg128_all, svmla_s32_x(pg128_all, isum_tmp,
+ svisum_mins, svdup_n_s32(-32))),
+ svsuper_block_scales);
+ }
+ } // end of case 128
+ break;
+ case 256:
+ case 512:
+ {
+ const svbool_t pg256_all = svptrue_pat_b8(SV_ALL);
+ const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4);
+ for (int i = 0; i < nb; ++i) {
+ const uint8_t * GGML_RESTRICT ql0 = vx0[i].ql;
+ const uint8_t * GGML_RESTRICT qh0 = vx0[i].qh;
+ const uint8_t * GGML_RESTRICT ql1 = vx1[i].ql;
+ const uint8_t * GGML_RESTRICT qh1 = vx1[i].qh;
+ const int8_t * GGML_RESTRICT q80 = vy0[i].qs;
+ const int8_t * GGML_RESTRICT q81 = vy1[i].qs;
+
+ const int8_t * GGML_RESTRICT scale0 = vx0[i].scales;
+ const int8_t * GGML_RESTRICT scale1 = vx1[i].scales;
+ svfloat32_t vx_d = svzip1_f32(svdup_n_f32(GGML_FP16_TO_FP32(vx0[i].d)), svdup_n_f32(GGML_FP16_TO_FP32(vx1[i].d)));
+ svfloat64_t vy_d_tmp = svreinterpret_f64_f32(svuzp1_f32(svdup_n_f32(vy0[i].d), svdup_n_f32(vy1[i].d)));
+ svfloat32_t vy_d = svreinterpret_f32_f64(svuzp1_f64(vy_d_tmp, vy_d_tmp));
+ svfloat32_t svsuper_block_scales = svmul_f32_x(pg32_4, vy_d, vx_d);
+ // process q8sum summation 256 bit route
+ const svint16_t q8sums_0 = svld1_s16(pg256_all, vy0[i].bsums);
+ const svint16_t q8sums_1 = svld1_s16(pg256_all, vy1[i].bsums);
+ const svint16_t q6scales_0 = svunpklo_s16(svld1_s8(pg256_all, scale0));
+ const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(pg256_all, scale1));
+ const svint64_t prod = svdup_n_s64(0);
+ svint32_t isum_tmp1 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_0, q6scales_0));
+ svint32_t isum_tmp2 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_0, q6scales_1));
+ svint32_t isum_tmp3 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_1, q6scales_0));
+ svint32_t isum_tmp4 = svreinterpret_s32_s64(svdot_s64(prod, q8sums_1, q6scales_1));
+ svint32_t isum_tmp5 = svtrn1_s32(isum_tmp1, isum_tmp2);
+ svint32_t isum_tmp6 = svtrn1_s32(isum_tmp3, isum_tmp4);
+ svint32_t isum_tmp7 = svreinterpret_s32_s64(svtrn2_s64(svreinterpret_s64_s32(isum_tmp5), svreinterpret_s64_s32(isum_tmp6)));
+ svint32_t isum_tmp8 = svreinterpret_s32_s64(svtrn1_s64(svreinterpret_s64_s32(isum_tmp5), svreinterpret_s64_s32(isum_tmp6)));
+ svint32_t isum_tmp9 = svadd_s32_x(pg256_all, isum_tmp7, isum_tmp8);
+ svint32_t isum_tmp10 = svreinterpret_s32_u8(svext_u8(svreinterpret_u8_s32(isum_tmp9), svreinterpret_u8_s32(isum_tmp9), 16));
+ svint32_t svisum_mins = svadd_s32_z(pg32_4, isum_tmp9, isum_tmp10);
+
+ // process mmla
+ svint8_t l0, l1, r0, r1;
+ svint32_t isum_tmp = svdup_n_s32(0);
+ for (int j = 0; j < QK_K/128; ++j) {
+ for (int k = 0; k < 8; k+=2) { // process 2 block
+ svuint8_t qhbits_0 = svld1_u8(pg256_all, qh0);
+ svuint8_t qhbits_1 = svld1_u8(pg256_all, qh1);
+ svuint8_t q6bits_0 = svld1_u8(pg256_all, ql0+32*((k%4)/2));
+ svuint8_t q6bits_1 = svld1_u8(pg256_all, ql1+32*((k%4)/2));
+ const int ql_pos = (k/4)*4;
+ svuint8_t q6bytes_0_lo = (ql_pos < 4) ? svand_n_u8_x(pg256_all, q6bits_0, 0xf) : svlsr_n_u8_x(pg256_all, q6bits_0, 4);
+ svuint8_t q6bytes_1_lo = (ql_pos < 4) ? svand_n_u8_x(pg256_all, q6bits_1, 0xf) : svlsr_n_u8_x(pg256_all, q6bits_1, 4);
+ const int qh_pos = (k/2)*2;
+ svuint8_t q6bytes_0_hi = svand_n_u8_x(pg256_all, qhbits_0, 0x3 << qh_pos);
+ svuint8_t q6bytes_1_hi = svand_n_u8_x(pg256_all, qhbits_1, 0x3 << qh_pos);
+ svint8_t q6bytes_0, q6bytes_1;
+ if (qh_pos <= 4) {
+ q6bytes_0 = svreinterpret_s8_u8(svmla_n_u8_x(pg256_all, q6bytes_0_lo, q6bytes_0_hi, 1 << (4 - qh_pos)));
+ q6bytes_1 = svreinterpret_s8_u8(svmla_n_u8_x(pg256_all, q6bytes_1_lo, q6bytes_1_hi, 1 << (4 - qh_pos)));
+ } else {
+ q6bytes_0 = svreinterpret_s8_u8(svorr_u8_x(pg256_all, q6bytes_0_lo, svlsr_n_u8_x(pg256_all, q6bytes_0_hi, (qh_pos - 4))));
+ q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg256_all, q6bytes_1_lo, svlsr_n_u8_x(pg256_all, q6bytes_1_hi, (qh_pos - 4))));
+ }
+ svint8_t q8bytes_0 = svld1_s8(pg256_all, q80+32*(k/2));
+ svint8_t q8bytes_1 = svld1_s8(pg256_all, q81+32*(k/2));
+ l0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1)));
+ l1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q6bytes_0), svreinterpret_s64_s8(q6bytes_1)));
+ r0 = svreinterpret_s8_s64(svzip1_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1)));
+ r1 = svreinterpret_s8_s64(svzip2_s64(svreinterpret_s64_s8(q8bytes_0), svreinterpret_s64_s8(q8bytes_1)));
+ svint32_t svscale0 = svzip1_s32(svdup_n_s32(scale0[k]), svdup_n_s32(scale1[k]));
+ svint32_t svscale1 = svzip1_s32(svdup_n_s32(scale0[k+1]), svdup_n_s32(scale1[k+1]));
+ isum_tmp = svmla_s32_x(pg256_all, isum_tmp, svmmla_s32(svdup_n_s32(0), r0, l0), svscale0);
+ isum_tmp = svmla_s32_x(pg256_all, isum_tmp, svmmla_s32(svdup_n_s32(0), r1, l1), svscale1);
+ }
+ qh0 += 32; qh1 += 32;
+ ql0 += 64; ql1 += 64;
+ q80 += 128; q81 += 128;
+ scale0 += 8; scale1 += 8;
+ } // end of for
+ svint32_t swap_isum_tmp = svext_s32(isum_tmp, isum_tmp, 4);
+ isum_tmp = svadd_s32_x(pg32_4, isum_tmp, swap_isum_tmp);
+ sum = svmla_f32_x(pg32_4, sum,
+ svcvt_f32_x(pg32_4, svmla_s32_x(pg32_4, isum_tmp,
+ svisum_mins, svdup_n_s32(-32))),
+ svsuper_block_scales);
+ }
+ } // end of case 256
+ break;
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ } // end of switch
+
+ svst1_f32(pg32_2, s, sum);
+ svst1_f32(pg32_2, s + bs, svreinterpret_f32_u8(svext_u8(svreinterpret_u8_f32(sum), svdup_n_u8(0), 8)));
+
+ return;
+ }
+#elif defined(__ARM_FEATURE_MATMUL_INT8)
+ if (nrc == 2) {
+ const block_q6_K * GGML_RESTRICT x0 = x;
+ const block_q6_K * GGML_RESTRICT x1 = (const block_q6_K *) ((const uint8_t *)vx + bx);
+ const block_q8_K * GGML_RESTRICT y0 = y;
+ const block_q8_K * GGML_RESTRICT y1 = (const block_q8_K *) ((const uint8_t *)vy + by);
+
+ float32x4_t vfsum = vdupq_n_f32(0.0f);
+
+ for (int i = 0; i < nb; ++i, ++x0, ++x1, ++y0, ++y1) {
+ const uint8_t * GGML_RESTRICT ql0 = x0->ql;
+ const uint8_t * GGML_RESTRICT ql1 = x1->ql;
+ const uint8_t * GGML_RESTRICT qh0 = x0->qh;
+ const uint8_t * GGML_RESTRICT qh1 = x1->qh;
+ const int8_t * GGML_RESTRICT qy0 = y0->qs;
+ const int8_t * GGML_RESTRICT qy1 = y1->qs;
+
+ const uint8x16_t mone = vdupq_n_u8(0x30);
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+
+ int32x4_t visum = vdupq_n_s32(0);
+
+ // process 8 blocks per iteration, totally 16 blocks
+ for (int j = 0; j < 2; ++j, qh0 += 32, ql0 += 64, qh1 += 32, ql1 += 64) {
+ int8x16_t vx0[8], vx1[8];
+
+ // de-quantize vx0[8]
+ {
+ const uint8x16x2_t qh_bits = vld1q_u8_x2(qh0);
+ const uint8x16x4_t ql_bits = vld1q_u8_x4(ql0);
+
+ uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4));
+ uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4));
+ uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2));
+ uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2));
+
+ vx0[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0));
+ vx0[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1));
+ vx0[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2));
+ vx0[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3));
+
+ q6h_0 = vandq_u8(mone, qh_bits.val[0]);
+ q6h_1 = vandq_u8(mone, qh_bits.val[1]);
+ q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2));
+ q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2));
+
+ vx0[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0));
+ vx0[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1));
+ vx0[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2));
+ vx0[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3));
+ }
+
+ // de-quantize vx1[8]
+ {
+ const uint8x16x2_t qh_bits = vld1q_u8_x2(qh1);
+ const uint8x16x4_t ql_bits = vld1q_u8_x4(ql1);
+
+ uint8x16_t q6h_0 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 4));
+ uint8x16_t q6h_1 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 4));
+ uint8x16_t q6h_2 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[0], 2));
+ uint8x16_t q6h_3 = vandq_u8(mone, vshlq_n_u8(qh_bits.val[1], 2));
+
+ vx1[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[0], m4b), q6h_0));
+ vx1[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[1], m4b), q6h_1));
+ vx1[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[2], m4b), q6h_2));
+ vx1[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(ql_bits.val[3], m4b), q6h_3));
+
+ q6h_0 = vandq_u8(mone, qh_bits.val[0]);
+ q6h_1 = vandq_u8(mone, qh_bits.val[1]);
+ q6h_2 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[0], 2));
+ q6h_3 = vandq_u8(mone, vshrq_n_u8(qh_bits.val[1], 2));
+
+ vx1[4] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[0], 4), q6h_0));
+ vx1[5] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[1], 4), q6h_1));
+ vx1[6] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[2], 4), q6h_2));
+ vx1[7] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(ql_bits.val[3], 4), q6h_3));
+ }
+
+ // process 16 elements (one block with same scale) per iteration
+ // - vx = concat(ql, qh) - 32
+ // - r1,r2,r3,r4 = smmla(vx, vy)
+ for (int k = 0; k < 8; ++k) {
+ const int blk = j * 8 + k;
+
+ const int8x16_t vy0 = vld1q_s8(qy0);
+ const int8x16_t vy1 = vld1q_s8(qy1);
+ qy0 += 16;
+ qy1 += 16;
+
+ const int32x4_t block_scale = {
+ x0->scales[blk],
+ x0->scales[blk],
+ x1->scales[blk],
+ x1->scales[blk],
+ };
+
+ // calculate four results at once with outer product
+ const int8x16_t vx_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k])));
+ const int8x16_t vx_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vx0[k]), vreinterpretq_s64_s8(vx1[k])));
+ const int8x16_t vy_l = vreinterpretq_s8_s64(vzip1q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1)));
+ const int8x16_t vy_h = vreinterpretq_s8_s64(vzip2q_s64(vreinterpretq_s64_s8(vy0), vreinterpretq_s64_s8(vy1)));
+ int32x4_t vr = vdupq_n_s32(0);
+ vr = vmmlaq_s32(vr, vx_l, vy_l);
+ vr = vmmlaq_s32(vr, vx_h, vy_h);
+
+ // apply block scale, will NOT overflow
+ // block_scale * sum_256(int6*int8) <= 2^(8+8+6+8) = 30 bits
+ visum = vmlaq_s32(visum, vr, block_scale);
+ }
+ }
+
+ // adjust bias, apply superblock scale
+ {
+ int32_t bias[4];
+ // NEON doesn't support int16 dot product, fallback to separated mul and add
+ const int16x8x2_t q8sums0 = vld1q_s16_x2(y0->bsums);
+ const int16x8x2_t q8sums1 = vld1q_s16_x2(y1->bsums);
+
+ int8x16_t scales_s8 = vld1q_s8(x0->scales);
+ const int16x8x2_t q6scales0 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}};
+ scales_s8 = vld1q_s8(x1->scales);
+ const int16x8x2_t q6scales1 = {{vmovl_s8(vget_low_s8(scales_s8)), vmovl_s8(vget_high_s8(scales_s8))}};
+
+ int32x4_t prod;
+ prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales0.val[0])),
+ vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales0.val[0]))),
+ vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales0.val[1])),
+ vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales0.val[1]))));
+ bias[0] = vaddvq_s32(prod);
+ prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales0.val[0])),
+ vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales0.val[0]))),
+ vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales0.val[1])),
+ vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales0.val[1]))));
+ bias[1] = vaddvq_s32(prod);
+ prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[0]), vget_low_s16 (q6scales1.val[0])),
+ vmull_s16(vget_high_s16(q8sums0.val[0]), vget_high_s16(q6scales1.val[0]))),
+ vaddq_s32(vmull_s16(vget_low_s16 (q8sums0.val[1]), vget_low_s16 (q6scales1.val[1])),
+ vmull_s16(vget_high_s16(q8sums0.val[1]), vget_high_s16(q6scales1.val[1]))));
+ bias[2] = vaddvq_s32(prod);
+ prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[0]), vget_low_s16 (q6scales1.val[0])),
+ vmull_s16(vget_high_s16(q8sums1.val[0]), vget_high_s16(q6scales1.val[0]))),
+ vaddq_s32(vmull_s16(vget_low_s16 (q8sums1.val[1]), vget_low_s16 (q6scales1.val[1])),
+ vmull_s16(vget_high_s16(q8sums1.val[1]), vget_high_s16(q6scales1.val[1]))));
+ bias[3] = vaddvq_s32(prod);
+
+ const int32x4_t vibias = vmulq_n_s32(vld1q_s32(bias), 32);
+
+ const float32x4_t superblock_scale = {
+ GGML_CPU_FP16_TO_FP32(x0->d) * y0->d,
+ GGML_CPU_FP16_TO_FP32(x0->d) * y1->d,
+ GGML_CPU_FP16_TO_FP32(x1->d) * y0->d,
+ GGML_CPU_FP16_TO_FP32(x1->d) * y1->d,
+ };
+
+ visum = vsubq_s32(visum, vibias);
+ vfsum = vmlaq_f32(vfsum, vcvtq_f32_s32(visum), superblock_scale);
+ }
+ }
+
+ // vfsum = ABCD -> ACBD
+ // AC -> s, BD -> (s+bs)
+ vfsum = vzip1q_f32(vfsum, vextq_f32(vfsum, vfsum, 2));
+ vst1_f32(s, vget_low_f32 (vfsum));
+ vst1_f32(s + bs, vget_high_f32(vfsum));
+
+ return;
+ }
+#endif
+
+#ifdef __ARM_FEATURE_SVE
+ float sum = 0;
+ svuint8_t m4b = svdup_n_u8(0xf);
+ svint32_t vzero = svdup_n_s32(0);
+ svuint8_t mone = svdup_n_u8(0x30);
+ svint8_t q6bytes_1, q6bytes_2, q6bytes_3, q6bytes_4;
+ svuint8_t q6h_1, q6h_2, q6h_3, q6h_4;
+
+ for (int i = 0; i < nb; ++i) {
+ const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * GGML_RESTRICT q6 = x[i].ql;
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ const int8_t * GGML_RESTRICT scale = x[i].scales;
+
+ const svbool_t pg16_8 = svptrue_pat_b16(SV_VL8);
+ const svint16_t q8sums_1 = svld1_s16(pg16_8, y[i].bsums);
+ const svint16_t q8sums_2 = svld1_s16(pg16_8, y[i].bsums + 8);
+ const svint16_t q6scales_1 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale));
+ const svint16_t q6scales_2 = svunpklo_s16(svld1_s8(svptrue_pat_b8(SV_VL8), scale + 8));
+ const svint64_t prod = svdup_n_s64(0);
+ int32_t isum_mins = svaddv_s64(svptrue_b64(), svadd_s64_x(svptrue_b64(), svdot_s64(prod, q8sums_1, q6scales_1),
+ svdot_s64(prod, q8sums_2, q6scales_2)));
+ int32_t isum = 0;
+
+ switch (vector_length) {
+ case 128:
+ {
+ const svbool_t pg32_4 = svptrue_pat_b32(SV_VL4);
+ const svbool_t pg8_16 = svptrue_pat_b8(SV_VL16);
+ svint32_t isum_tmp = svdup_n_s32(0);
+ for (int j = 0; j < QK_K/128; ++j) {
+ svuint8_t qhbits_1 = svld1_u8(pg8_16, qh);
+ svuint8_t qhbits_2 = svld1_u8(pg8_16, qh+16);
+ qh += 32;
+ svuint8_t q6bits_1 = svld1_u8(pg8_16, q6);
+ svuint8_t q6bits_2 = svld1_u8(pg8_16, q6+16);
+ svuint8_t q6bits_3 = svld1_u8(pg8_16, q6+32);
+ svuint8_t q6bits_4 = svld1_u8(pg8_16, q6+48);
+ q6 += 64;
+ svint8_t q8bytes_1 = svld1_s8(pg8_16, q8);
+ svint8_t q8bytes_2 = svld1_s8(pg8_16, q8+16);
+ svint8_t q8bytes_3 = svld1_s8(pg8_16, q8+32);
+ svint8_t q8bytes_4 = svld1_s8(pg8_16, q8+48);
+ q8 += 64;
+
+ q6h_1 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 4));
+ q6h_2 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 4));
+ q6h_3 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_1, 2));
+ q6h_4 = svand_u8_x(pg16_8, mone, svlsl_n_u8_x(pg16_8, qhbits_2, 2));
+ q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_1, m4b), q6h_1));
+ q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_2, m4b), q6h_2));
+ q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_3, m4b), q6h_3));
+ q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svand_u8_x(pg8_16, q6bits_4, m4b), q6h_4));
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]);
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]);
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]);
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]);
+
+ scale += 4;
+ q8bytes_1 = svld1_s8(pg8_16, q8);
+ q8bytes_2 = svld1_s8(pg8_16, q8+16);
+ q8bytes_3 = svld1_s8(pg8_16, q8+32);
+ q8bytes_4 = svld1_s8(pg8_16, q8+48);
+ q8 += 64;
+
+ q6h_1 = svand_u8_x(pg16_8, mone, qhbits_1);
+ q6h_2 = svand_u8_x(pg16_8, mone, qhbits_2);
+ q6h_3 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_1, 2));
+ q6h_4 = svand_u8_x(pg16_8, mone, svlsr_n_u8_x(pg16_8, qhbits_2, 2));
+ q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_1, 4), q6h_1));
+ q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_2, 4), q6h_2));
+ q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_3, 4), q6h_3));
+ q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_16, svlsr_n_u8_x(pg8_16, q6bits_4, 4), q6h_4));
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale[0]);
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale[1]);
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale[2]);
+ isum_tmp = svmla_n_s32_x(pg32_4, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale[3]);
+ scale += 4;
+ }
+ isum += svaddv_s32(pg32_4, isum_tmp);
+ sum += d_all * y[i].d * (isum - 32 * isum_mins);
+ }
+ break;
+ case 256:
+ case 512:
+ {
+ const svbool_t pg8_2 = svptrue_pat_b8(SV_VL2);
+ const svbool_t pg32_8 = svptrue_pat_b32(SV_VL8);
+ const svbool_t pg8_32 = svptrue_pat_b8(SV_VL32);
+ svint32_t isum_tmp = svdup_n_s32(0);
+ for (int j = 0; j < QK_K/128; j++) {
+ svuint8_t qhbits_1 = svld1_u8(pg8_32, qh);
+ qh += 32;
+ svuint8_t q6bits_1 = svld1_u8(pg8_32, q6);
+ svuint8_t q6bits_2 = svld1_u8(pg8_32, q6+32);
+ q6 += 64;
+ svint8_t q8bytes_1 = svld1_s8(pg8_32, q8);
+ svint8_t q8bytes_2 = svld1_s8(pg8_32, q8+32);
+ svint8_t q8bytes_3 = svld1_s8(pg8_32, q8+64);
+ svint8_t q8bytes_4 = svld1_s8(pg8_32, q8+96);
+ q8 += 128;
+ q6h_1 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 4));
+ q6h_2 = svand_u8_x(pg8_32, mone, svlsl_n_u8_x(pg8_32, qhbits_1, 2));
+ q6h_3 = svand_u8_x(pg8_32, mone, qhbits_1);
+ q6h_4 = svand_u8_x(pg8_32, mone, svlsr_n_u8_x(pg8_32, qhbits_1, 2));
+ q6bytes_1 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_1, m4b), q6h_1));
+ q6bytes_2 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svand_u8_x(pg8_32, q6bits_2, m4b), q6h_2));
+ q6bytes_3 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_1, 4), q6h_3));
+ q6bytes_4 = svreinterpret_s8_u8(svorr_u8_x(pg8_32, svlsr_n_u8_x(pg8_32, q6bits_2, 4), q6h_4));
+
+ svint8_t scale_lane_1_tmp = svld1_s8(pg8_2, scale);
+ scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp);
+ scale_lane_1_tmp= svzip1_s8(scale_lane_1_tmp, scale_lane_1_tmp);
+ svint8_t scale_lane_2_tmp = svld1_s8(pg8_2, scale+2);
+ scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp);
+ scale_lane_2_tmp = svzip1_s8(scale_lane_2_tmp, scale_lane_2_tmp);
+ svint8_t scale_lane_3_tmp = svld1_s8(pg8_2, scale+4);
+ scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp);
+ scale_lane_3_tmp = svzip1_s8(scale_lane_3_tmp, scale_lane_3_tmp);
+ svint8_t scale_lane_4_tmp = svld1_s8(pg8_2, scale+6);
+ scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp);
+ scale_lane_4_tmp = svzip1_s8(scale_lane_4_tmp, scale_lane_4_tmp);
+ svint32_t scale_lane_1 = svunpklo_s32(svunpklo_s16(scale_lane_1_tmp));
+ svint32_t scale_lane_2 = svunpklo_s32(svunpklo_s16(scale_lane_2_tmp));
+ svint32_t scale_lane_3 = svunpklo_s32(svunpklo_s16(scale_lane_3_tmp));
+ svint32_t scale_lane_4 = svunpklo_s32(svunpklo_s16(scale_lane_4_tmp));
+
+ isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_1, q8bytes_1), scale_lane_1);
+ isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_2, q8bytes_2), scale_lane_2);
+ isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_3, q8bytes_3), scale_lane_3);
+ isum_tmp = svmla_s32_x(pg32_8, isum_tmp, svdot_s32(vzero, q6bytes_4, q8bytes_4), scale_lane_4);
+ scale += 8;
+ }
+ isum += svaddv_s32(pg32_8, isum_tmp);
+ sum += d_all * y[i].d * (isum - 32 * isum_mins);
+ }
+ break;
+ default:
+ assert(false && "Unsupported vector length");
+ break;
+ }
+ }
+
+ *s = sum;
+
+#elif __ARM_NEON
+ float sum = 0;
+
+ const uint8x16_t m4b = vdupq_n_u8(0xF);
+ const int32x4_t vzero = vdupq_n_s32(0);
+ //const int8x16_t m32s = vdupq_n_s8(32);
+
+ const uint8x16_t mone = vdupq_n_u8(3);
+
+ ggml_int8x16x4_t q6bytes;
+ ggml_uint8x16x4_t q6h;
+
+ for (int i = 0; i < nb; ++i) {
+
+ const float d_all = GGML_CPU_FP16_TO_FP32(x[i].d);
+
+ const uint8_t * GGML_RESTRICT q6 = x[i].ql;
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ const int8_t * GGML_RESTRICT scale = x[i].scales;
+
+ const ggml_int16x8x2_t q8sums = ggml_vld1q_s16_x2(y[i].bsums);
+ const int8x16_t scales = vld1q_s8(scale);
+ const ggml_int16x8x2_t q6scales = {{vmovl_s8(vget_low_s8(scales)), vmovl_s8(vget_high_s8(scales))}};
+
+ const int32x4_t prod = vaddq_s32(vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[0]), vget_low_s16 (q6scales.val[0])),
+ vmull_s16(vget_high_s16(q8sums.val[0]), vget_high_s16(q6scales.val[0]))),
+ vaddq_s32(vmull_s16(vget_low_s16 (q8sums.val[1]), vget_low_s16 (q6scales.val[1])),
+ vmull_s16(vget_high_s16(q8sums.val[1]), vget_high_s16(q6scales.val[1]))));
+ int32_t isum_mins = vaddvq_s32(prod);
+
+ int32_t isum = 0;
+
+ for (int j = 0; j < QK_K/128; ++j) {
+
+ ggml_uint8x16x2_t qhbits = ggml_vld1q_u8_x2(qh); qh += 32;
+ ggml_uint8x16x4_t q6bits = ggml_vld1q_u8_x4(q6); q6 += 64;
+ ggml_int8x16x4_t q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, qhbits.val[0]), 4);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, qhbits.val[1]), 4);
+ uint8x16_t shifted = vshrq_n_u8(qhbits.val[0], 2);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 2);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0])), m32s);
+ //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1])), m32s);
+ //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2])), m32s);
+ //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3])), m32s);
+ q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[0], m4b), q6h.val[0]));
+ q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[1], m4b), q6h.val[1]));
+ q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[2], m4b), q6h.val[2]));
+ q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vandq_u8(q6bits.val[3], m4b), q6h.val[3]));
+
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+
+ scale += 4;
+
+ q8bytes = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ shifted = vshrq_n_u8(qhbits.val[0], 4);
+ q6h.val[0] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 4);
+ q6h.val[1] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[0], 6);
+ q6h.val[2] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+ shifted = vshrq_n_u8(qhbits.val[1], 6);
+ q6h.val[3] = vshlq_n_u8(vandq_u8(mone, shifted), 4);
+
+ //q6bytes.val[0] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0])), m32s);
+ //q6bytes.val[1] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1])), m32s);
+ //q6bytes.val[2] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2])), m32s);
+ //q6bytes.val[3] = vsubq_s8(vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3])), m32s);
+ q6bytes.val[0] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[0], 4), q6h.val[0]));
+ q6bytes.val[1] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[1], 4), q6h.val[1]));
+ q6bytes.val[2] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[2], 4), q6h.val[2]));
+ q6bytes.val[3] = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6bits.val[3], 4), q6h.val[3]));
+
+ isum += vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[0], q8bytes.val[0])) * scale[0] +
+ vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[1], q8bytes.val[1])) * scale[1] +
+ vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[2], q8bytes.val[2])) * scale[2] +
+ vaddvq_s32(ggml_vdotq_s32(vzero, q6bytes.val[3], q8bytes.val[3])) * scale[3];
+ scale += 4;
+ }
+ //sum += isum * d_all * y[i].d;
+ sum += d_all * y[i].d * (isum - 32 * isum_mins);
+
+ }
+ *s = sum;
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_q6_K_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+#if defined (__ARM_NEON)
+static const int8_t keven_signs_q2xs[1024] = {
+ 1, 1, 1, 1, 1, 1, 1, 1, -1, 1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, 1,
+ 1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, 1, 1, -1, -1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, -1,
+ 1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, 1, 1, -1, 1, -1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, -1,
+ 1, 1, -1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, 1,
+ 1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, -1,
+ 1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, 1,
+ 1, 1, 1, -1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, 1,
+ 1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, 1, 1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, -1,
+ 1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, 1, -1, 1, 1, -1, -1, 1, 1, 1, -1, 1, -1,
+ 1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, 1,
+ 1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, 1,
+ 1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, -1,
+ 1, 1, 1, 1, -1, -1, 1, 1, -1, 1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, 1,
+ 1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, -1,
+ 1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, 1, 1, 1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, -1,
+ 1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, 1,
+ 1, 1, 1, 1, 1, 1, -1, -1, -1, 1, 1, 1, 1, 1, -1, 1, 1, -1, 1, 1, 1, 1, -1, 1, -1, -1, 1, 1, 1, 1, -1, -1,
+ 1, 1, -1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, -1, 1, -1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, 1,
+ 1, 1, 1, -1, 1, 1, -1, 1, -1, 1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, 1,
+ 1, 1, -1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, -1,
+ 1, 1, 1, 1, -1, 1, -1, 1, -1, 1, 1, 1, -1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, 1,
+ 1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, 1, 1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, -1,
+ 1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, 1, -1, 1, 1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, -1,
+ 1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, 1,
+ 1, 1, 1, 1, 1, -1, -1, 1, -1, 1, 1, 1, 1, -1, -1, -1, 1, -1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, 1,
+ 1, 1, -1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, 1, -1, -1, 1, -1, -1, -1, 1, 1, -1, -1, -1,
+ 1, 1, 1, -1, 1, -1, -1, -1, -1, 1, 1, -1, 1, -1, -1, 1, 1, -1, 1, -1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, -1,
+ 1, 1, -1, -1, 1, -1, -1, 1, -1, 1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, 1,
+ 1, 1, 1, 1, -1, -1, -1, -1, -1, 1, 1, 1, -1, -1, -1, 1, 1, -1, 1, 1, -1, -1, -1, 1, -1, -1, 1, 1, -1, -1, -1, -1,
+ 1, 1, -1, 1, -1, -1, -1, 1, -1, 1, -1, 1, -1, -1, -1, -1, 1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, 1,
+ 1, 1, 1, -1, -1, -1, -1, 1, -1, 1, 1, -1, -1, -1, -1, -1, 1, -1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, 1,
+ 1, 1, -1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, 1, 1, -1, -1, -1, -1, -1, -1, 1, -1, -1, -1, -1, -1, -1, -1, -1,
+};
+#endif
+
+void ggml_vec_dot_iq2_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_iq2_xxs * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_NEON)
+
+ const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
+
+ uint32_t aux32[4];
+ const uint8_t * aux8 = (const uint8_t *)aux32;
+
+ ggml_int8x16x4_t q2u;
+ ggml_int8x16x4_t q2s;
+ ggml_int8x16x4_t q8b;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
+ const uint16_t * GGML_RESTRICT q2 = x[i].qs;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+ float sumf1 = 0, sumf2 = 0;
+ for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+ memcpy(aux32, q2, 4*sizeof(uint32_t)); q2 += 8;
+ q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 0])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 1])));
+ q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 2])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 3])));
+ q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[ 8])), vld1_s8((const void *)(iq2xxs_grid + aux8[ 9])));
+ q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xxs_grid + aux8[10])), vld1_s8((const void *)(iq2xxs_grid + aux8[11])));
+ q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
+ q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
+ q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 7) & 127))));
+ q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[3] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[3] >> 21) & 127))));
+ q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
+ q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
+ q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
+ q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
+ const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]), q2u.val[1], q8b.val[1]);
+ const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]), q2u.val[3], q8b.val[3]);
+ sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[1] >> 28));
+ sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[3] >> 28));
+ }
+ sumf += d*(sumf1 + sumf2);
+ }
+ *s = 0.25f * sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_iq2_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_iq2_xs * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_NEON)
+
+ const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
+
+ ggml_int8x16x4_t q2u;
+ ggml_int8x16x4_t q2s;
+ ggml_int8x16x4_t q8b;
+
+ int32x4x4_t scales32;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
+ const uint16_t * GGML_RESTRICT q2 = x[i].qs;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+ const uint8x8_t scales8 = vld1_u8(x[i].scales);
+ const uint8x8_t scales_l = vand_u8(scales8, vdup_n_u8(0xf));
+ const uint8x8_t scales_h = vshr_n_u8(scales8, 4);
+ uint8x16_t scales = vcombine_u8(vzip1_u8(scales_l, scales_h), vzip2_u8(scales_l, scales_h));
+ scales = vaddq_u8(vshlq_n_u8(scales, 1), vdupq_n_u8(1));
+ const uint16x8_t scales1 = vmovl_u8(vget_low_u8(scales));
+ const uint16x8_t scales2 = vmovl_u8(vget_high_u8(scales));
+ scales32.val[0] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales1)));
+ scales32.val[1] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales1)));
+ scales32.val[2] = vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(scales2)));
+ scales32.val[3] = vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(scales2)));
+ int32x4_t sumi = vdupq_n_s32(0);
+ for (int ib64 = 0; ib64 < QK_K/64; ++ib64) {
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+ q2u.val[0] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[0] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[1] & 511))));
+ q2u.val[1] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[2] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[3] & 511))));
+ q2u.val[2] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[4] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[5] & 511))));
+ q2u.val[3] = vcombine_s8(vld1_s8((const void *)(iq2xs_grid + (q2[6] & 511))), vld1_s8((const void *)(iq2xs_grid + (q2[7] & 511))));
+ q2s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[0] >> 9))), vld1_s8((const void *)(signs64 + (q2[1] >> 9))));
+ q2s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[2] >> 9))), vld1_s8((const void *)(signs64 + (q2[3] >> 9))));
+ q2s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[4] >> 9))), vld1_s8((const void *)(signs64 + (q2[5] >> 9))));
+ q2s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + (q2[6] >> 9))), vld1_s8((const void *)(signs64 + (q2[7] >> 9))));
+ q2u.val[0] = vmulq_s8(q2u.val[0], q2s.val[0]);
+ q2u.val[1] = vmulq_s8(q2u.val[1], q2s.val[1]);
+ q2u.val[2] = vmulq_s8(q2u.val[2], q2s.val[2]);
+ q2u.val[3] = vmulq_s8(q2u.val[3], q2s.val[3]);
+ const int32x4_t p1 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[0], q8b.val[0]);
+ const int32x4_t p2 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[1], q8b.val[1]);
+ const int32x4_t p3 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[2], q8b.val[2]);
+ const int32x4_t p4 = ggml_vdotq_s32(vdupq_n_s32(0), q2u.val[3], q8b.val[3]);
+ const int32x4_t p = vpaddq_s32(vpaddq_s32(p1, p2), vpaddq_s32(p3, p4));
+ sumi = vmlaq_s32(sumi, p, scales32.val[ib64]);
+ q2 += 8;
+ }
+ sumf += d*vaddvq_s32(sumi);
+ }
+ *s = 0.125f * sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_iq2_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_iq2_s * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_NEON)
+
+ static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
+ };
+
+ static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
+
+ const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1);
+ const uint8x16_t mask2 = vld1q_u8(k_mask2);
+ const uint8x16_t m1 = vdupq_n_u8(1);
+ const int32x4_t vzero = vdupq_n_s32(0);
+
+ uint8x16x2_t vs;
+ ggml_int8x16x4_t q2s;
+ ggml_int8x16x4_t q8b;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
+
+ const uint8_t * GGML_RESTRICT qs = x[i].qs;
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
+ const uint16_t * GGML_RESTRICT signs = (const uint16_t *)(x[i].qs + QK_K/8);
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ int sumi1 = 0, sumi2 = 0;
+ for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+ q2s.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[0] | ((qh[ib32+0] << 8) & 0x300)))),
+ vld1_s8((const int8_t *)(iq2s_grid + (qs[1] | ((qh[ib32+0] << 6) & 0x300)))));
+ q2s.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[2] | ((qh[ib32+0] << 4) & 0x300)))),
+ vld1_s8((const int8_t *)(iq2s_grid + (qs[3] | ((qh[ib32+0] << 2) & 0x300)))));
+ q2s.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[4] | ((qh[ib32+1] << 8) & 0x300)))),
+ vld1_s8((const int8_t *)(iq2s_grid + (qs[5] | ((qh[ib32+1] << 6) & 0x300)))));
+ q2s.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq2s_grid + (qs[6] | ((qh[ib32+1] << 4) & 0x300)))),
+ vld1_s8((const int8_t *)(iq2s_grid + (qs[7] | ((qh[ib32+1] << 2) & 0x300)))));
+ qs += 8;
+
+ vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16)));
+ vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
+ vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
+ vs.val[0] = vceqq_u8(vs.val[0], mask2);
+ vs.val[1] = vceqq_u8(vs.val[1], mask2);
+
+ q2s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[0]);
+ q2s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[1]);
+
+ vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16)));
+ vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
+ vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
+ vs.val[0] = vceqq_u8(vs.val[0], mask2);
+ vs.val[1] = vceqq_u8(vs.val[1], mask2);
+
+ signs += 4;
+
+ q2s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[0], m1)), q2s.val[2]);
+ q2s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vorrq_u8(vs.val[1], m1)), q2s.val[3]);
+
+ const int32x4_t p1 = ggml_vdotq_s32(vzero, q2s.val[0], q8b.val[0]);
+ const int32x4_t p2 = ggml_vdotq_s32(vzero, q2s.val[1], q8b.val[1]);
+ const int32x4_t p3 = ggml_vdotq_s32(vzero, q2s.val[2], q8b.val[2]);
+ const int32x4_t p4 = ggml_vdotq_s32(vzero, q2s.val[3], q8b.val[3]);
+
+ sumi1 += vaddvq_s32(p1) * (1 + 2*(x[i].scales[ib32+0] & 0xf));
+ sumi2 += vaddvq_s32(p2) * (1 + 2*(x[i].scales[ib32+0] >> 4));
+ sumi1 += vaddvq_s32(p3) * (1 + 2*(x[i].scales[ib32+1] & 0xf));
+ sumi2 += vaddvq_s32(p4) * (1 + 2*(x[i].scales[ib32+1] >> 4));
+ }
+ sumf += d*(sumi1 + sumi2);
+ }
+
+ *s = 0.125f * sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq2_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+
+}
+
+void ggml_vec_dot_iq3_xxs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_iq3_xxs * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_NEON)
+
+ const uint64_t * signs64 = (const uint64_t *)keven_signs_q2xs;
+
+ uint32_t aux32[2];
+
+ ggml_int8x16x4_t q3s;
+ ggml_int8x16x4_t q8b;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
+ const uint8_t * GGML_RESTRICT q3 = x[i].qs;
+ const uint8_t * GGML_RESTRICT gas = x[i].qs + QK_K/4;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+ float sumf1 = 0, sumf2 = 0;
+ for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+ memcpy(aux32, gas, 2*sizeof(uint32_t)); gas += 2*sizeof(uint32_t);
+ const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3xxs_grid[q3[ 0]], iq3xxs_grid[q3[ 1]], iq3xxs_grid[q3[ 2]], iq3xxs_grid[q3[ 3]]);
+ const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3xxs_grid[q3[ 4]], iq3xxs_grid[q3[ 5]], iq3xxs_grid[q3[ 6]], iq3xxs_grid[q3[ 7]]);
+ const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3xxs_grid[q3[ 8]], iq3xxs_grid[q3[ 9]], iq3xxs_grid[q3[10]], iq3xxs_grid[q3[11]]);
+ const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3xxs_grid[q3[12]], iq3xxs_grid[q3[13]], iq3xxs_grid[q3[14]], iq3xxs_grid[q3[15]]);
+ q3 += 16;
+ q3s.val[0] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 7) & 127))));
+ q3s.val[1] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[0] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[0] >> 21) & 127))));
+ q3s.val[2] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 0) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 7) & 127))));
+ q3s.val[3] = vcombine_s8(vld1_s8((const void *)(signs64 + ((aux32[1] >> 14) & 127))), vld1_s8((const void *)(signs64 + ((aux32[1] >> 21) & 127))));
+ q3s.val[0] = vmulq_s8(q3s.val[0], vreinterpretq_s8_u32(aux32x4_0));
+ q3s.val[1] = vmulq_s8(q3s.val[1], vreinterpretq_s8_u32(aux32x4_1));
+ q3s.val[2] = vmulq_s8(q3s.val[2], vreinterpretq_s8_u32(aux32x4_2));
+ q3s.val[3] = vmulq_s8(q3s.val[3], vreinterpretq_s8_u32(aux32x4_3));
+ const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
+ const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
+ sumf1 += vaddvq_s32(p1) * (0.5f + (aux32[0] >> 28));
+ sumf2 += vaddvq_s32(p2) * (0.5f + (aux32[1] >> 28));
+ }
+ sumf += d*(sumf1 + sumf2);
+ }
+ *s = 0.5f * sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_xxs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_iq3_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_iq3_s * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined(__ARM_NEON)
+
+ typedef union {
+ uint16x8_t vec_index;
+ uint16_t index[8];
+ } vec_index_t;
+
+ static const uint8_t k_mask1[32] = {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+ 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03
+ };
+
+ static const uint8_t k_mask2[16] = {0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80,};
+
+ static const int16_t k_shift[8] = {8, 7, 6, 5, 4, 3, 2, 1};
+
+ const ggml_uint8x16x2_t mask1 = ggml_vld1q_u8_x2(k_mask1);
+ const uint8x16_t mask2 = vld1q_u8(k_mask2);
+
+ const int16x8_t hshift = vld1q_s16(k_shift);
+ const uint16x8_t m256 = vdupq_n_u16(256);
+ const uint8x16_t m1 = vdupq_n_u8(1);
+
+ uint8x16x2_t vs;
+ ggml_int8x16x4_t q3s;
+ ggml_int8x16x4_t q8b;
+ vec_index_t idx;
+
+ uint32_t scales32[2];
+ const uint8_t * scales8 = (const uint8_t *)scales32;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+ const float d = GGML_CPU_FP16_TO_FP32(x[i].d) * y[i].d;
+ const uint8_t * GGML_RESTRICT qs = x[i].qs;
+ const uint8_t * GGML_RESTRICT qh = x[i].qh;
+ const uint16_t * GGML_RESTRICT signs = (const uint16_t *)x[i].signs;
+ const int8_t * GGML_RESTRICT q8 = y[i].qs;
+
+ memcpy(scales32, x[i].scales, 4);
+ scales32[1] = (((scales32[0] >> 4) & 0x0f0f0f0f) << 1) | 0x01010101;
+ scales32[0] = ((scales32[0] & 0x0f0f0f0f) << 1) | 0x01010101;
+
+ int sumi1 = 0, sumi2 = 0;
+ for (int ib32 = 0; ib32 < QK_K/32; ib32 += 2) {
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ const uint8x16_t idx_l = vld1q_u8(qs); qs += 16;
+ idx.vec_index = vorrq_u16(vmovl_u8(vget_low_u8 (idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+0]), hshift), m256));
+ const uint32x4_t aux32x4_0 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]],
+ iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]);
+ const uint32x4_t aux32x4_1 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]],
+ iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]);
+ idx.vec_index = vorrq_u16(vmovl_u8(vget_high_u8(idx_l)), vandq_u16(vshlq_u16(vdupq_n_u16(qh[ib32+1]), hshift), m256));
+ const uint32x4_t aux32x4_2 = ggml_vld1q_u32(iq3s_grid[idx.index[0]], iq3s_grid[idx.index[1]],
+ iq3s_grid[idx.index[2]], iq3s_grid[idx.index[3]]);
+ const uint32x4_t aux32x4_3 = ggml_vld1q_u32(iq3s_grid[idx.index[4]], iq3s_grid[idx.index[5]],
+ iq3s_grid[idx.index[6]], iq3s_grid[idx.index[7]]);
+
+
+ vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[0] | ((uint32_t) signs[1] << 16)));
+ vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
+ vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
+ vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1);
+ vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1);
+
+ q3s.val[0] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_0));
+ q3s.val[1] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_1));
+
+ vs.val[0] = vreinterpretq_u8_u32(vdupq_n_u32(signs[2] | ((uint32_t) signs[3] << 16)));
+ vs.val[1] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[1]), mask2);
+ vs.val[0] = vandq_u8(ggml_vqtbl1q_u8(vs.val[0], mask1.val[0]), mask2);
+ vs.val[0] = vorrq_u8(vceqq_u8(vs.val[0], mask2), m1);
+ vs.val[1] = vorrq_u8(vceqq_u8(vs.val[1], mask2), m1);
+
+ signs += 4;
+
+ q3s.val[2] = vmulq_s8(vreinterpretq_s8_u8(vs.val[0]), vreinterpretq_s8_u32(aux32x4_2));
+ q3s.val[3] = vmulq_s8(vreinterpretq_s8_u8(vs.val[1]), vreinterpretq_s8_u32(aux32x4_3));
+
+ const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[0], q8b.val[0]), q3s.val[1], q8b.val[1]);
+ const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q3s.val[2], q8b.val[2]), q3s.val[3], q8b.val[3]);
+
+ sumi1 += vaddvq_s32(p1) * scales8[ib32/2+0];
+ sumi2 += vaddvq_s32(p2) * scales8[ib32/2+4];
+ }
+ sumf += d*(sumi1 + sumi2);
+ }
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq3_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_iq1_s_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_iq1_s * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined __ARM_NEON
+
+ ggml_int8x16x4_t q1b;
+ ggml_int8x16x4_t q8b;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * qs = x[i].qs;
+ const uint16_t * qh = x[i].qh;
+
+ int sumi1 = 0, sumi2 = 0, sumi3 = 0;
+
+ for (int ib = 0; ib < QK_K/32; ib += 2) {
+
+ q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[ib+0] << 8) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[ib+0] << 5) & 0x700)))));
+ q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[ib+0] << 2) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[ib+0] >> 1) & 0x700)))));
+ q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[ib+1] << 8) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[ib+1] << 5) & 0x700)))));
+ q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[ib+1] << 2) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[ib+1] >> 1) & 0x700)))));
+ qs += 8;
+
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ const int32x4_t p1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[0], q8b.val[0]), q1b.val[1], q8b.val[1]);
+ const int32x4_t p2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q1b.val[2], q8b.val[2]), q1b.val[3], q8b.val[3]);
+
+ const int ls1 = 2*((qh[ib+0] >> 12) & 7) + 1;
+ const int ls2 = 2*((qh[ib+1] >> 12) & 7) + 1;
+ sumi1 += vaddvq_s32(p1) * ls1;
+ sumi2 += vaddvq_s32(p2) * ls2;
+ sumi3 += (y[i].bsums[2*ib+0] + y[i].bsums[2*ib+1]) * ls1 * (qh[ib+0] & 0x8000 ? -1 : 1)
+ + (y[i].bsums[2*ib+2] + y[i].bsums[2*ib+3]) * ls2 * (qh[ib+1] & 0x8000 ? -1 : 1);
+
+ }
+
+ sumf += y[i].d * GGML_CPU_FP16_TO_FP32(x[i].d) * (sumi1 + sumi2 + IQ1S_DELTA * sumi3);
+ }
+
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq1_s_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_iq1_m_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(n % QK_K == 0);
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+
+ const block_iq1_m * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+ iq1m_scale_t scale;
+
+#if defined __ARM_NEON
+ const int32x4_t mask = vdupq_n_s32(0x7);
+ const int32x4_t mone = vdupq_n_s32(1);
+ const int32x4_t mzero = vdupq_n_s32(0);
+
+ ggml_int8x16x4_t deltas;
+ deltas.val[0] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(+1));
+ deltas.val[1] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(+1));
+ deltas.val[2] = vcombine_s8(vdup_n_s8(+1), vdup_n_s8(-1));
+ deltas.val[3] = vcombine_s8(vdup_n_s8(-1), vdup_n_s8(-1));
+
+ ggml_int8x16x4_t q1b;
+ ggml_int8x16x4_t q8b;
+
+ uint32_t aux32;
+ const uint8_t * aux8 = (const uint8_t *)&aux32;
+
+ float sumf = 0;
+ for (int i = 0; i < nb; ++i) {
+
+ const int8_t * q8 = y[i].qs;
+ const uint8_t * qs = x[i].qs;
+ const uint8_t * qh = x[i].qh;
+ const uint16_t * sc = (const uint16_t *)x[i].scales;
+
+ scale.u16 = (sc[0] >> 12) | ((sc[1] >> 8) & 0x00f0) | ((sc[2] >> 4) & 0x0f00) | (sc[3] & 0xf000);
+
+ int32x4_t sumi1 = mzero;
+ int32x4_t sumi2 = mzero;
+
+ for (int ib = 0; ib < QK_K/32; ib += 2) {
+
+ q1b.val[0] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[0] | ((qh[0] << 8) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[1] | ((qh[0] << 4) & 0x700)))));
+ q1b.val[1] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[2] | ((qh[1] << 8) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[3] | ((qh[1] << 4) & 0x700)))));
+ q1b.val[2] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[4] | ((qh[2] << 8) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[5] | ((qh[2] << 4) & 0x700)))));
+ q1b.val[3] = vcombine_s8(vld1_s8((const int8_t *)(iq1s_grid + (qs[6] | ((qh[3] << 8) & 0x700)))),
+ vld1_s8((const int8_t *)(iq1s_grid + (qs[7] | ((qh[3] << 4) & 0x700)))));
+
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ const int32x4_t p1 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[0], q8b.val[0]), ggml_vdotq_s32(mzero, q1b.val[1], q8b.val[1]));
+ const int32x4_t p2 = vpaddq_s32(ggml_vdotq_s32(mzero, q1b.val[2], q8b.val[2]), ggml_vdotq_s32(mzero, q1b.val[3], q8b.val[3]));
+ const int32x4_t p12 = vpaddq_s32(p1, p2);
+
+ const uint32_t * qh32 = (const uint32_t *)qh; // we are 4-byte aligned, so we can do that
+ aux32 = ((qh32[0] >> 3) & 0x01010101) | ((qh32[0] >> 6) & 0x02020202);
+
+ const int32x4_t p3 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[0]], q8b.val[0]), ggml_vdotq_s32(mzero, deltas.val[aux8[1]], q8b.val[1]));
+ const int32x4_t p4 = vpaddq_s32(ggml_vdotq_s32(mzero, deltas.val[aux8[2]], q8b.val[2]), ggml_vdotq_s32(mzero, deltas.val[aux8[3]], q8b.val[3]));
+ const int32x4_t p34 = vpaddq_s32(p3, p4);
+
+ int32x4_t scales_4 = ggml_vld1q_u32(sc[ib/2] >> 0, sc[ib/2] >> 3, sc[ib/2] >> 6, sc[ib/2] >> 9);
+
+ scales_4 = vaddq_s32(vshlq_n_s32(vandq_s32(scales_4, mask), 1), mone);
+
+ sumi1 = vmlaq_s32(sumi1, scales_4, p12);
+ sumi2 = vmlaq_s32(sumi2, scales_4, p34);
+
+ qs += 8; qh += 4;
+
+ }
+
+ sumf += y[i].d * GGML_CPU_FP16_TO_FP32(scale.f16) * (vaddvq_s32(sumi1) + IQ1M_DELTA * vaddvq_s32(sumi2));
+ }
+
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ UNUSED(scale);
+ ggml_vec_dot_iq1_m_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
+void ggml_vec_dot_iq4_nl_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+ assert(n % QK4_NL == 0);
+ static_assert(QK4_NL == QK8_0, "QK4_NL and QK8_0 must be the same");
+
+ const block_iq4_nl * GGML_RESTRICT x = vx;
+ const block_q8_0 * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK4_NL;
+
+ int ib = 0;
+ float sumf = 0;
+
+#if defined __ARM_NEON
+ const int8x16_t values = vld1q_s8(kvalues_iq4nl);
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ uint8x16x2_t q4bits;
+ int8x16x4_t q4b;
+ int8x16x4_t q8b;
+ int32x4_t prod_1, prod_2;
+
+ for (; ib + 1 < nb; ib += 2) {
+
+ q4bits.val[0] = vld1q_u8(x[ib + 0].qs);
+ q4bits.val[1] = vld1q_u8(x[ib + 1].qs);
+ q8b.val[0] = vld1q_s8(y[ib + 0].qs);
+ q8b.val[1] = vld1q_s8(y[ib + 0].qs + 16);
+ q8b.val[2] = vld1q_s8(y[ib + 1].qs);
+ q8b.val[3] = vld1q_s8(y[ib + 1].qs + 16);
+
+ q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
+ q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
+ q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
+ q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
+
+ prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
+ prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
+
+ sumf +=
+ GGML_CPU_FP16_TO_FP32(x[ib+0].d) * GGML_CPU_FP16_TO_FP32(y[ib + 0].d) * vaddvq_s32(prod_1) +
+ GGML_CPU_FP16_TO_FP32(x[ib+1].d) * GGML_CPU_FP16_TO_FP32(y[ib + 1].d) * vaddvq_s32(prod_2);
+ }
+
+#endif
+ for (; ib < nb; ++ib) {
+ const float d = GGML_CPU_FP16_TO_FP32(y[ib].d)*GGML_CPU_FP16_TO_FP32(x[ib].d);
+ int sumi1 = 0, sumi2 = 0;
+ for (int j = 0; j < QK4_NL/2; ++j) {
+ sumi1 += y[ib].qs[j+ 0] * kvalues_iq4nl[x[ib].qs[j] & 0xf];
+ sumi2 += y[ib].qs[j+QK4_NL/2] * kvalues_iq4nl[x[ib].qs[j] >> 4];
+ }
+ sumf += d * (sumi1 + sumi2);
+ }
+ *s = sumf;
+}
+
+void ggml_vec_dot_iq4_xs_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, size_t bx, const void * GGML_RESTRICT vy, size_t by, int nrc) {
+ assert(nrc == 1);
+ UNUSED(nrc);
+ UNUSED(bx);
+ UNUSED(by);
+ UNUSED(bs);
+ assert(n % QK_K == 0);
+
+ const block_iq4_xs * GGML_RESTRICT x = vx;
+ const block_q8_K * GGML_RESTRICT y = vy;
+
+ const int nb = n / QK_K;
+
+#if defined __ARM_NEON
+ const int8x16_t values = vld1q_s8(kvalues_iq4nl);
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ ggml_uint8x16x2_t q4bits;
+ ggml_int8x16x4_t q4b;
+ ggml_int8x16x4_t q8b;
+ int32x4_t prod_1, prod_2;
+
+ float sumf = 0;
+
+ for (int ibl = 0; ibl < nb; ++ibl) {
+
+ const int8_t * q8 = y[ibl].qs;
+ const uint8_t * q4 = x[ibl].qs;
+ uint16_t h = x[ibl].scales_h;
+
+ int sumi1 = 0, sumi2 = 0;
+ for (int ib = 0; ib < QK_K/64; ++ib) {
+
+ q4bits = ggml_vld1q_u8_x2(q4); q4 += 32;
+ q8b = ggml_vld1q_s8_x4(q8); q8 += 64;
+
+ q4b.val[0] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[0], m4b));
+ q4b.val[1] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[0], 4));
+ q4b.val[2] = ggml_vqtbl1q_s8(values, vandq_u8 (q4bits.val[1], m4b));
+ q4b.val[3] = ggml_vqtbl1q_s8(values, vshrq_n_u8(q4bits.val[1], 4));
+
+ prod_1 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[0], q8b.val[0]), q4b.val[1], q8b.val[1]);
+ prod_2 = ggml_vdotq_s32(ggml_vdotq_s32(vdupq_n_s32(0), q4b.val[2], q8b.val[2]), q4b.val[3], q8b.val[3]);
+
+ int ls1 = ((x[ibl].scales_l[ib] & 0xf) | ((h << 4) & 0x30)) - 32;
+ int ls2 = ((x[ibl].scales_l[ib] >> 4) | ((h << 2) & 0x30)) - 32;
+ h >>= 4;
+ sumi1 += vaddvq_s32(prod_1) * ls1;
+ sumi2 += vaddvq_s32(prod_2) * ls2;
+
+ }
+
+ sumf += GGML_CPU_FP16_TO_FP32(x[ibl].d) * y[ibl].d * (sumi1 + sumi2);
+ }
+
+ *s = sumf;
+
+#else
+ UNUSED(x);
+ UNUSED(y);
+ UNUSED(nb);
+ ggml_vec_dot_iq4_xs_q8_K_generic(n, s, bs, vx, bx, vy, by, nrc);
+#endif
+}
+
diff --git a/llama.cpp/ggml/src/ggml-cpu/arch/arm/repack.cpp b/llama.cpp/ggml/src/ggml-cpu/arch/arm/repack.cpp
new file mode 100644
index 0000000..fd05c60
--- /dev/null
+++ b/llama.cpp/ggml/src/ggml-cpu/arch/arm/repack.cpp
@@ -0,0 +1,4237 @@
+#define GGML_COMMON_IMPL_CPP
+#define GGML_COMMON_DECL_CPP
+#include "ggml-common.h"
+#include "ggml-backend-impl.h"
+
+#include "ggml-impl.h"
+#include "ggml-cpu.h"
+#include "ggml-cpu-impl.h"
+#include "simd-mappings.h"
+#include "traits.h"
+
+#include <cmath>
+#include <cstring>
+#include <cassert>
+#include <cstdlib> // for qsort
+#include <cstdio> // for GGML_ASSERT
+
+#define GGML_CPU_CLANG_WORKAROUND
+#include "../../repack.h"
+
+#if defined(__GNUC__)
+#pragma GCC diagnostic ignored "-Woverlength-strings"
+#endif
+
+#define UNUSED GGML_UNUSED
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && (defined(__ARM_FEATURE_MATMUL_INT8) || defined(__ARM_FEATURE_DOTPROD))
+// Helper for decoding scales and mins of Q4_K and Q5_K block formats
+static inline void decode_q_Kx8_6bit_scales(const uint8_t * scales_in, int16x8_t * out_mins, int8_t * out_scales) {
+ constexpr uint32_t kmask1 = 0x3f3f3f3f;
+ constexpr uint32_t kmask2 = 0x0f0f0f0f;
+ constexpr uint32_t kmask3 = 0x03030303;
+ constexpr uint8_t scales_size = 12;
+
+ uint32_t sm[3];
+ memcpy(sm, scales_in, scales_size);
+
+ const uint32_t mins_0_3 = sm[1] & kmask1;
+ const uint32_t mins_4_7 = ((sm[2] >> 4) & kmask2) | (((sm[1] >> 6) & kmask3) << 4);
+ const uint32x2_t mins_u32 = { mins_0_3, mins_4_7 };
+
+ *out_mins = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(mins_u32)));
+
+ uint32_t scales_u32[2];
+ scales_u32[0] = sm[0] & kmask1;
+ scales_u32[1] = (sm[2] & kmask2) | (((sm[0] >> 6) & kmask3) << 4);
+ memcpy(out_scales, scales_u32, 8);
+}
+#endif
+
+void ggml_quantize_mat_q8_0_4x4(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
+ assert(QK8_0 == 32);
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t srcv[4][8];
+ float id[4];
+
+ for (int i = 0; i < nb; i++) {
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int row_iter = 0; row_iter < 4; row_iter++) {
+ for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]);
+ for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]);
+ for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ id[row_iter] = d ? 1.0f / d : 0.0f;
+
+ y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d);
+ }
+
+ for (int j = 0; j < 8; j++) {
+ float32x4_t v = vmulq_n_f32(srcv[0][j], id[0]);
+ int32x4_t vi = vcvtnq_s32_f32(v);
+ y[i].qs[16 * j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[16 * j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[16 * j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[16 * j + 3] = vgetq_lane_s32(vi, 3);
+
+ v = vmulq_n_f32(srcv[1][j], id[1]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[16 * j + 4] = vgetq_lane_s32(vi, 0);
+ y[i].qs[16 * j + 5] = vgetq_lane_s32(vi, 1);
+ y[i].qs[16 * j + 6] = vgetq_lane_s32(vi, 2);
+ y[i].qs[16 * j + 7] = vgetq_lane_s32(vi, 3);
+
+ v = vmulq_n_f32(srcv[2][j], id[2]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[16 * j + 8] = vgetq_lane_s32(vi, 0);
+ y[i].qs[16 * j + 9] = vgetq_lane_s32(vi, 1);
+ y[i].qs[16 * j + 10] = vgetq_lane_s32(vi, 2);
+ y[i].qs[16 * j + 11] = vgetq_lane_s32(vi, 3);
+
+ v = vmulq_n_f32(srcv[3][j], id[3]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[16 * j + 12] = vgetq_lane_s32(vi, 0);
+ y[i].qs[16 * j + 13] = vgetq_lane_s32(vi, 1);
+ y[i].qs[16 * j + 14] = vgetq_lane_s32(vi, 2);
+ y[i].qs[16 * j + 15] = vgetq_lane_s32(vi, 3);
+ }
+ }
+#else
+ UNUSED(nb);
+ UNUSED(y);
+ ggml_quantize_mat_q8_0_4x4_generic(x, vy, k);
+#endif
+}
+
+void ggml_quantize_mat_q8_0_4x8(const float * GGML_RESTRICT x, void * GGML_RESTRICT vy, int64_t k) {
+ assert(QK8_0 == 32);
+ assert(k % QK8_0 == 0);
+ const int nb = k / QK8_0;
+
+ block_q8_0x4 * GGML_RESTRICT y = (block_q8_0x4 *) vy;
+
+#if defined(__ARM_NEON)
+ float32x4_t srcv[4][8];
+ float id[4];
+
+ for (int i = 0; i < nb; i++) {
+ float32x4_t asrcv[8];
+ float32x4_t amaxv[8];
+
+ for (int row_iter = 0; row_iter < 4; row_iter++) {
+ for (int j = 0; j < 8; j++) srcv[row_iter][j] = vld1q_f32(x + row_iter * k + i * 32 + 4 * j);
+ for (int j = 0; j < 8; j++) asrcv[j] = vabsq_f32(srcv[row_iter][j]);
+
+ for (int j = 0; j < 4; j++) amaxv[2 * j] = vmaxq_f32(asrcv[2 * j], asrcv[2 * j + 1]);
+ for (int j = 0; j < 2; j++) amaxv[4 * j] = vmaxq_f32(amaxv[4 * j], amaxv[4 * j + 2]);
+ for (int j = 0; j < 1; j++) amaxv[8 * j] = vmaxq_f32(amaxv[8 * j], amaxv[8 * j + 4]);
+
+ const float amax = vmaxvq_f32(amaxv[0]);
+
+ const float d = amax / ((1 << 7) - 1);
+ id[row_iter] = d ? 1.0f / d : 0.0f;
+
+ y[i].d[row_iter] = GGML_CPU_FP32_TO_FP16(d);
+ }
+
+ for (int j = 0; j < 4; j++) {
+ float32x4_t v = vmulq_n_f32(srcv[0][2 * j], id[0]);
+ int32x4_t vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 0] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 1] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 2] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 3] = vgetq_lane_s32(vi, 3);
+ v = vmulq_n_f32(srcv[0][2 * j + 1], id[0]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 4] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 5] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 6] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 7] = vgetq_lane_s32(vi, 3);
+
+ v = vmulq_n_f32(srcv[1][2 * j], id[1]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 8] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 9] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 10] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 11] = vgetq_lane_s32(vi, 3);
+ v = vmulq_n_f32(srcv[1][2 * j + 1], id[1]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 12] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 13] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 14] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 15] = vgetq_lane_s32(vi, 3);
+
+ v = vmulq_n_f32(srcv[2][2 * j], id[2]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 16] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 17] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 18] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 19] = vgetq_lane_s32(vi, 3);
+ v = vmulq_n_f32(srcv[2][2 * j + 1], id[2]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 20] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 21] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 22] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 23] = vgetq_lane_s32(vi, 3);
+
+ v = vmulq_n_f32(srcv[3][2 * j], id[3]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 24] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 25] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 26] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 27] = vgetq_lane_s32(vi, 3);
+ v = vmulq_n_f32(srcv[3][2 * j + 1], id[3]);
+ vi = vcvtnq_s32_f32(v);
+ y[i].qs[32 * j + 28] = vgetq_lane_s32(vi, 0);
+ y[i].qs[32 * j + 29] = vgetq_lane_s32(vi, 1);
+ y[i].qs[32 * j + 30] = vgetq_lane_s32(vi, 2);
+ y[i].qs[32 * j + 31] = vgetq_lane_s32(vi, 3);
+ }
+ }
+
+#else
+ UNUSED(nb);
+ UNUSED(y);
+ ggml_quantize_mat_q8_0_4x8_generic(x, vy, k);
+#endif
+}
+
+void ggml_gemv_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 4;
+
+ assert (n % qk == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
+
+ for (int c = 0; c < nc; c += ncols_interleaved) {
+ const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
+ float32x4_t acc = vdupq_n_f32(0);
+ for (int b = 0; b < nb; b++) {
+ int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs);
+ int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16);
+ int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32);
+ int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48);
+ float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d);
+
+ int8x16_t a0 = vld1q_s8(a_ptr->qs);
+ int8x16_t a1 = vld1q_s8(a_ptr->qs + qk/2);
+ float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d);
+
+ int32x4_t ret = vdupq_n_s32(0);
+
+ ret = vdotq_laneq_s32(ret, b0 << 4, a0, 0);
+ ret = vdotq_laneq_s32(ret, b1 << 4, a0, 1);
+ ret = vdotq_laneq_s32(ret, b2 << 4, a0, 2);
+ ret = vdotq_laneq_s32(ret, b3 << 4, a0, 3);
+
+ ret = vdotq_laneq_s32(ret, b0 & 0xf0U, a1, 0);
+ ret = vdotq_laneq_s32(ret, b1 & 0xf0U, a1, 1);
+ ret = vdotq_laneq_s32(ret, b2 & 0xf0U, a1, 2);
+ ret = vdotq_laneq_s32(ret, b3 & 0xf0U, a1, 3);
+
+ acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4),
+ vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd)));
+ a_ptr++;
+ b_ptr++;
+ }
+ vst1q_f32(s, acc);
+ s += ncols_interleaved;
+ }
+ return;
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 8;
+
+ assert (n % qk == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ const block_q4_0x4 * b_ptr = (const block_q4_0x4 *) vx;
+
+ for (int c = 0; c < nc; c += ncols_interleaved) {
+ const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
+ float32x4_t acc = vdupq_n_f32(0);
+ for (int b = 0; b < nb; b++) {
+ int8x16_t b0 = vld1q_s8((const int8_t *) b_ptr->qs);
+ int8x16_t b1 = vld1q_s8((const int8_t *) b_ptr->qs + 16);
+ int8x16_t b2 = vld1q_s8((const int8_t *) b_ptr->qs + 32);
+ int8x16_t b3 = vld1q_s8((const int8_t *) b_ptr->qs + 48);
+ float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d);
+
+ int8x16_t a0 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs);
+ int8x16_t a1 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 1);
+ int8x16_t a2 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 2);
+ int8x16_t a3 = (int8x16_t) vld1q_dup_s64((const int64_t *) a_ptr->qs + 3);
+ float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d);
+
+ int32x4_t ret0 = vdupq_n_s32(0);
+ int32x4_t ret1 = vdupq_n_s32(0);
+
+ ret0 = vdotq_s32(ret0, b0 << 4, a0);
+ ret1 = vdotq_s32(ret1, b1 << 4, a0);
+ ret0 = vdotq_s32(ret0, b2 << 4, a1);
+ ret1 = vdotq_s32(ret1, b3 << 4, a1);
+
+ ret0 = vdotq_s32(ret0, b0 & 0xf0U, a2);
+ ret1 = vdotq_s32(ret1, b1 & 0xf0U, a2);
+ ret0 = vdotq_s32(ret0, b2 & 0xf0U, a3);
+ ret1 = vdotq_s32(ret1, b3 & 0xf0U, a3);
+
+ int32x4_t ret = vpaddq_s32(ret0, ret1);
+
+ acc = vfmaq_f32(acc, vcvtq_n_f32_s32(ret, 4),
+ vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd)));
+ a_ptr++;
+ b_ptr++;
+ }
+ vst1q_f32(s, acc);
+ s += ncols_interleaved;
+ }
+ return;
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 8;
+ const int blocklen = 8;
+
+ assert (n % qk == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE)
+ if (ggml_cpu_get_sve_cnt() == QK8_0) {
+ const void * b_ptr = vx;
+ const void * a_ptr = vy;
+ float * res_ptr = s;
+
+ __asm__ __volatile__(
+ "ptrue p0.b\n"
+ "add %x[b_ptr], %x[b_ptr], #0x10\n"
+ "1:" // Column loop
+ "add x22, %x[a_ptr], #0x2\n"
+ "mov z31.b, #0x0\n"
+ "mov x21, %x[nb]\n"
+ "2:" // Block loop
+ "ld1b { z30.b }, p0/Z, [%x[b_ptr]]\n"
+ "ld1b { z29.b }, p0/Z, [%x[b_ptr], #1, MUL VL]\n"
+ "mov z28.s, #0x0\n"
+ "mov z27.s, #0x0\n"
+ "ld1rd { z26.d }, p0/Z, [x22]\n"
+ "ld1b { z25.b }, p0/Z, [%x[b_ptr], #2, MUL VL]\n"
+ "sub x20, x22, #0x2\n"
+ "sub x21, x21, #0x1\n"
+ "ld1b { z24.b }, p0/Z, [%x[b_ptr], #3, MUL VL]\n"
+ "ld1rd { z23.d }, p0/Z, [x22, #8]\n"
+ "lsl z22.b, z30.b, #0x4\n"
+ "lsl z16.b, z29.b, #0x4\n"
+ "and z30.b, z30.b, #0xf0\n"
+ "and z29.b, z29.b, #0xf0\n"
+ "ld1rd { z21.d }, p0/Z, [x22, #16]\n"
+ "ld1rd { z20.d }, p0/Z, [x22, #24]\n"
+ "lsl z19.b, z25.b, #0x4\n"
+ "and z25.b, z25.b, #0xf0\n"
+ "ld1rh { z17.h }, p0/Z, [x20]\n"
+ "ld1h { z18.s }, p0/Z, [%x[b_ptr], #-1, MUL VL]\n"
+ "sdot z28.s, z22.b, z26.b\n"
+ "sdot z27.s, z16.b, z26.b\n"
+ "lsl z16.b, z24.b, #0x4\n"
+ "add x22, x22, #0x22\n"
+ "and z24.b, z24.b, #0xf0\n"
+ "add %x[b_ptr], %x[b_ptr], #0x90\n"
+ "fcvt z17.s, p0/m, z17.h\n"
+ "fcvt z18.s, p0/m, z18.h\n"
+ "sdot z28.s, z19.b, z23.b\n"
+ "sdot z27.s, z16.b, z23.b\n"
+ "fmul z18.s, z18.s, z17.s\n"
+ "sdot z28.s, z30.b, z21.b\n"
+ "sdot z27.s, z29.b, z21.b\n"
+ "sdot z28.s, z25.b, z20.b\n"
+ "sdot z27.s, z24.b, z20.b\n"
+ "uzp1 z17.s, z28.s, z27.s\n"
+ "uzp2 z16.s, z28.s, z27.s\n"
+ "add z17.s, z17.s, z16.s\n"
+ "asr z17.s, z17.s, #0x4\n"
+ "scvtf z17.s, p0/m, z17.s\n"
+ "fmla z31.s, p0/M, z17.s, z18.s\n"
+ "cbnz x21, 2b\n"
+ "sub %x[nc], %x[nc], #0x8\n"
+ "st1w { z31.s }, p0, [%x[res_ptr]]\n"
+ "add %x[res_ptr], %x[res_ptr], #0x20\n"
+ "cbnz %x[nc], 1b\n"
+ : [b_ptr] "+&r" (b_ptr), [res_ptr] "+&r" (res_ptr), [nc] "+&r" (nc)
+ : [a_ptr] "r" (a_ptr), [nb] "r" (nb)
+ : "memory", "p0", "x20", "x21", "x22", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+ return;
+ }
+#endif // #if defined(__ARM_FEATURE_SVE)
+
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
+ ggml_gemv_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 4;
+
+ assert (n % qk == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl);
+ const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
+ float * res_ptr = s;
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb);
+
+ float32x4_t sumf = vdupq_n_f32(0);
+ for (int l = 0; l < nb; l++) {
+ uint8x16_t b_0 = vld1q_u8(b_ptr[l].qs + 0);
+ uint8x16_t b_1 = vld1q_u8(b_ptr[l].qs + 16);
+ uint8x16_t b_2 = vld1q_u8(b_ptr[l].qs + 32);
+ uint8x16_t b_3 = vld1q_u8(b_ptr[l].qs + 48);
+
+ int8x16_t b_0_hi = vqtbl1q_s8(kvalues, b_0 >> 4);
+ int8x16_t b_0_lo = vqtbl1q_s8(kvalues, b_0 & 0x0F);
+ int8x16_t b_1_hi = vqtbl1q_s8(kvalues, b_1 >> 4);
+ int8x16_t b_1_lo = vqtbl1q_s8(kvalues, b_1 & 0x0F);
+ int8x16_t b_2_hi = vqtbl1q_s8(kvalues, b_2 >> 4);
+ int8x16_t b_2_lo = vqtbl1q_s8(kvalues, b_2 & 0x0F);
+ int8x16_t b_3_hi = vqtbl1q_s8(kvalues, b_3 >> 4);
+ int8x16_t b_3_lo = vqtbl1q_s8(kvalues, b_3 & 0x0F);
+
+ int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 0);
+ int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16);
+
+ int32x4_t sumi = vdupq_n_s32(0);
+ sumi = vdotq_laneq_s32(sumi, b_0_lo, a_0, 0);
+ sumi = vdotq_laneq_s32(sumi, b_0_hi, a_1, 0);
+ sumi = vdotq_laneq_s32(sumi, b_1_lo, a_0, 1);
+ sumi = vdotq_laneq_s32(sumi, b_1_hi, a_1, 1);
+ sumi = vdotq_laneq_s32(sumi, b_2_lo, a_0, 2);
+ sumi = vdotq_laneq_s32(sumi, b_2_hi, a_1, 2);
+ sumi = vdotq_laneq_s32(sumi, b_3_lo, a_0, 3);
+ sumi = vdotq_laneq_s32(sumi, b_3_hi, a_1, 3);
+
+ float32x4_t a_d = vcvt_f32_f16(vld1_dup_f16((const float16_t *)&a_ptr[l].d));
+ float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d));
+ float32x4_t d = a_d * b_d;
+
+ sumf = vmlaq_f32(sumf, d, vcvtq_f32_s32(sumi));
+ }
+
+ vst1q_f32(res_ptr + x * 4, sumf);
+ }
+ return;
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
+ ggml_gemv_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q4_K_8x4_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ constexpr int col_groups = ncols_interleaved / 4; // 0123 and 4567
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+
+ // 1x8 tile = 2 x 4
+ float32x4_t acc_f32[col_groups];
+
+ const block_q8_K * GGML_RESTRICT q8_ptr = (const block_q8_K *) vy;
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < col_groups; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ float32x4_t q4_d_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d)); // d0 d1 d2 d3
+ float32x4_t q4_d_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d + 4)); // d4 d5 d6 d7
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d);
+ float32x4_t sb_scale_0123 = vmulq_f32(q4_d_0, q8_d);
+ float32x4_t sb_scale_4567 = vmulq_f32(q4_d_1, q8_d);
+ float32x4_t q4_dmin_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin)); // dmin 0..3
+ float32x4_t q4_dmin_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin + 4)); // dmin 4..7
+ float32x4_t sb_min_0123 = vmulq_f32(q4_dmin_0, q8_d);
+ float32x4_t sb_min_4567 = vmulq_f32(q4_dmin_1, q8_d);
+
+ // interleaved bias_acc: [0]->r0 0123, [1]->r0 4567
+ int32x4_t bias_acc[2] = { vdupq_n_s32(0), vdupq_n_s32(0) };
+ int32x4_t acc_lo[col_groups];
+ int32x4_t acc_hi[col_groups];
+
+ // Each bsum is 16 elements, pairwise add leaves us with the 8 bsums of the entire block
+ const int16x8_t bsums = vpaddq_s16(vld1q_s16(q8_ptr[b].bsums), vld1q_s16(q8_ptr[b].bsums + 8));
+ int16_t bsums_arr[8];
+ vst1q_s16(bsums_arr, bsums);
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ for (int i = 0; i < col_groups; i++) {
+ acc_lo[i] = vdupq_n_s32(0);
+ acc_hi[i] = vdupq_n_s32(0);
+ }
+ // Need scales for the low and high nibbles
+ // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total
+ int16x8_t q4sb_mins[2];
+ int16x8_t q4sb_scales[2];
+ for (int i = 0; i < 2; i++) {
+ int8_t aux_q4sb[8];
+ const int offset = sb * 24 + i * 12;
+ decode_q_Kx8_6bit_scales(&q4_ptr[b].scales[offset], &q4sb_mins[i], aux_q4sb);
+ q4sb_scales[i] = vmovl_s8(vld1_s8(aux_q4sb));
+ }
+
+ int8x16_t q8_qs[64 / 16];
+ for (int i = 0; i < 64 / 16; i++) {
+ q8_qs[i] = vld1q_s8(q8_ptr[b].qs + sb * 64 + i * 16);
+ }
+
+ for (int c = 0; c < col_groups; c++) {
+ uint8x16_t q4_cols[8];
+ for (int i = 0; i < 8; i++) {
+ q4_cols[i] = vld1q_u8(q4_ptr[b].qs + sb * QK_K + i * 32 + 16 * c);
+ }
+
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[0], m4b)), q8_qs[0], 0);
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[1], m4b)), q8_qs[0], 1);
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[2], m4b)), q8_qs[0], 2);
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[3], m4b)), q8_qs[0], 3);
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[4], m4b)), q8_qs[1], 0);
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[5], m4b)), q8_qs[1], 1);
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[6], m4b)), q8_qs[1], 2);
+ acc_lo[c] = vdotq_laneq_s32(acc_lo[c], vreinterpretq_s8_u8(vandq_u8(q4_cols[7], m4b)), q8_qs[1], 3);
+
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[0], 4)), q8_qs[2], 0);
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[1], 4)), q8_qs[2], 1);
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[2], 4)), q8_qs[2], 2);
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[3], 4)), q8_qs[2], 3);
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[4], 4)), q8_qs[3], 0);
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[5], 4)), q8_qs[3], 1);
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[6], 4)), q8_qs[3], 2);
+ acc_hi[c] = vdotq_laneq_s32(acc_hi[c], vreinterpretq_s8_u8(vshrq_n_u8(q4_cols[7], 4)), q8_qs[3], 3);
+ }
+
+ // Scales
+ // row c0123 blk0 and blk1
+ const int16x4_t sc_0123_lo = vget_low_s16(q4sb_scales[0]);
+ const int16x4_t sc_0123_hi = vget_low_s16(q4sb_scales[1]);
+ const float32x4_t sumf_0123 = vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_0123_lo), acc_lo[0]),
+ vmulq_s32(vmovl_s16(sc_0123_hi), acc_hi[0])));
+ acc_f32[0] = vfmaq_f32(acc_f32[0], sb_scale_0123, sumf_0123);
+ // row c4567 blk0 and blk1
+ const int16x4_t sc_4567_lo = vget_high_s16(q4sb_scales[0]);
+ const int16x4_t sc_4567_hi = vget_high_s16(q4sb_scales[1]);
+ const float32x4_t sumf_4567 = vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_4567_lo), acc_lo[1]),
+ vmulq_s32(vmovl_s16(sc_4567_hi), acc_hi[1])));
+ acc_f32[1] = vfmaq_f32(acc_f32[1], sb_scale_4567, sumf_4567);
+
+ // Bias Correction
+ const int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[2 * sb + 0]);
+ const int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[2 * sb + 1]);
+
+ bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_lo, vget_low_s16(q4sb_mins[0]));
+ bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_hi, vget_low_s16(q4sb_mins[1]));
+ bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_lo, vget_high_s16(q4sb_mins[0]));
+ bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_hi, vget_high_s16(q4sb_mins[1]));
+ } // for sb
+
+ acc_f32[0] = vmlsq_f32(acc_f32[0], vcvtq_f32_s32(bias_acc[0]), sb_min_0123);
+ acc_f32[1] = vmlsq_f32(acc_f32[1], vcvtq_f32_s32(bias_acc[1]), sb_min_4567);
+ } // for b
+
+ int base = x * ncols_interleaved;
+ vst1q_f32(s + base, acc_f32[0]);
+ vst1q_f32(s + base + 4, acc_f32[1]);
+ } // for x
+ return;
+#endif // #if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q4_K_8x4_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q4_K_8x8_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ constexpr int col_pairs = ncols_interleaved / 2;
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+
+ // 1x8 tile = 2 x 4
+ float32x4_t acc_f32[ncols_interleaved / 4];
+
+ const block_q8_K * GGML_RESTRICT q8_ptr = (const block_q8_K *) vy;
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < ncols_interleaved / 4; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ float32x4_t q4_d_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d)); // d0 d1 d2 d3
+ float32x4_t q4_d_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d + 4)); // d4 d5 d6 d7
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d);
+ float32x4_t sb_scale_0 = vmulq_f32(q4_d_0, q8_d);
+ float32x4_t sb_scale_1 = vmulq_f32(q4_d_1, q8_d);
+ float32x4_t q4_dmin_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin)); // dmin 0..3
+ float32x4_t q4_dmin_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin + 4)); // dmin 4..7
+ float32x4_t sb_min_0 = vmulq_f32(q4_dmin_0, q8_d);
+ float32x4_t sb_min_1 = vmulq_f32(q4_dmin_1, q8_d);
+
+ // interleaved bias_acc: [0]->r0 0123, [1]->r0 4567
+ int32x4_t bias_acc[2] = { vdupq_n_s32(0), vdupq_n_s32(0) };
+ // 2 sb each iteration
+ int32x4_t acc_lo[col_pairs];
+ int32x4_t acc_hi[col_pairs];
+
+ // Each bsum is 16 elements, pairwise add leaves us with the 8 bsums of the entire block
+ const int16x8_t bsums = vpaddq_s16(vld1q_s16(q8_ptr[b].bsums), vld1q_s16(q8_ptr[b].bsums + 8));
+ int16_t bsums_arr[8];
+ vst1q_s16(bsums_arr, bsums);
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ for (int i = 0; i < col_pairs; i++) {
+ acc_lo[i] = vdupq_n_s32(0);
+ acc_hi[i] = vdupq_n_s32(0);
+ }
+ // Need scales for the low and high nibbles
+ // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total
+ int16x8_t q4sb_mins[2]; // int16 as its needed for bias_acc later
+ int16x8_t q4sb_scales[2];
+ for (int i = 0; i < 2; i++) {
+ int8_t aux_q4sb[8];
+ const int offset = sb * 24 + i * 12;
+ decode_q_Kx8_6bit_scales(&q4_ptr[b].scales[offset], &q4sb_mins[i], aux_q4sb);
+ q4sb_scales[i] = vmovl_s8(vld1_s8(aux_q4sb));
+ }
+
+ const uint8_t * q4_base = q4_ptr[b].qs + sb * QK_K;
+
+ // Load the 64 quants from q8K duplicated to use vecdots with the interelaved columns
+ // but still need the qs to use the low and hi bits from q4
+ const int8_t * q8_base = q8_ptr[b].qs + sb * 64;
+ int8x16_t q8_qs[8];
+ for (int i = 0; i < 8; i++) {
+ q8_qs[i] = (int8x16_t) vld1q_dup_s64((const int64_t *) (q8_base + i * 8));
+ }
+
+ // Q4s columns iterated in pairs (01, 23, 45, 67)
+ for (int cp = 0; cp < col_pairs; cp++) {
+ uint8x16_t q4_qs_cp_0 = vld1q_u8(q4_base + 16 * cp);
+ uint8x16_t q4_qs_cp_1 = vld1q_u8(q4_base + 16 * cp + 64);
+ uint8x16_t q4_qs_cp_2 = vld1q_u8(q4_base + 16 * cp + 128);
+ uint8x16_t q4_qs_cp_3 = vld1q_u8(q4_base + 16 * cp + 192);
+
+ acc_lo[cp] =
+ ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_0, m4b)), q8_qs[0]); // 0 .. 7
+ acc_lo[cp] =
+ ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_1, m4b)), q8_qs[1]); // 8 ..15
+ acc_lo[cp] =
+ ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_2, m4b)), q8_qs[2]); // 16..23
+ acc_lo[cp] =
+ ggml_vdotq_s32(acc_lo[cp], vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_3, m4b)), q8_qs[3]); // 24..31
+
+ acc_hi[cp] =
+ ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_0, 4)), q8_qs[4]); // 32..39
+ acc_hi[cp] =
+ ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_1, 4)), q8_qs[5]); // 40..47
+ acc_hi[cp] =
+ ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_2, 4)), q8_qs[6]); // 48..55
+ acc_hi[cp] =
+ ggml_vdotq_s32(acc_hi[cp], vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_3, 4)), q8_qs[7]); // 56..63
+ }
+
+ // Iterates over a pair of column pairs (4 columns) to use a single 128 register
+ // p = 0 -> 0123 p2 -> 4567
+ for (int i = 0, p = 0; p < col_pairs; i++, p += 2) {
+ int16x4_t group_scales_lo = p == 0 ? vget_low_s16(q4sb_scales[0]) : vget_high_s16(q4sb_scales[0]);
+ int16x4_t group_scales_hi = p == 0 ? vget_low_s16(q4sb_scales[1]) : vget_high_s16(q4sb_scales[1]);
+ float32x4_t sb_scale = p == 0 ? sb_scale_0 : sb_scale_1;
+
+ // 0123 or 4567
+ float32x4_t sumf_0 =
+ vcvtq_f32_s32(vmulq_s32(vmovl_s16(group_scales_lo), vpaddq_s32(acc_lo[p], acc_lo[p + 1])));
+ acc_f32[i] = vfmaq_f32(acc_f32[i], sb_scale, sumf_0);
+
+ float32x4_t sumf_1 =
+ vcvtq_f32_s32(vmulq_s32(vmovl_s16(group_scales_hi), vpaddq_s32(acc_hi[p], acc_hi[p + 1])));
+ acc_f32[i] = vfmaq_f32(acc_f32[i], sb_scale, sumf_1);
+ }
+
+ // Multiply Acc bsum + mins
+ // Each pair of subblocks share the same bsums
+ // Load scalar bsum → broadcast to a vector (vdupq_n_s16(s)).
+ int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[2 * sb + 0]);
+ int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[2 * sb + 1]);
+
+ // cols 0-3 bias
+ bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_lo, vget_low_s16(q4sb_mins[0]));
+ bias_acc[0] = vmlal_s16(bias_acc[0], bsums_vec_hi, vget_low_s16(q4sb_mins[1]));
+
+ // cols 4-7 bias
+ bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_lo, vget_high_s16(q4sb_mins[0]));
+ bias_acc[1] = vmlal_s16(bias_acc[1], bsums_vec_hi, vget_high_s16(q4sb_mins[1]));
+ } // for sb
+
+ acc_f32[0] = vmlsq_f32(acc_f32[0], vcvtq_f32_s32(bias_acc[0]), sb_min_0);
+ acc_f32[1] = vmlsq_f32(acc_f32[1], vcvtq_f32_s32(bias_acc[1]), sb_min_1);
+ } // for b
+
+ int base = x * ncols_interleaved;
+ vst1q_f32(s + base, acc_f32[0]);
+ vst1q_f32(s + base + 4, acc_f32[1]);
+ } // for x
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q5_K_8x8_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ constexpr int col_pairs = ncols_interleaved / 2;
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ const uint8x16_t mone = vdupq_n_u8(1);
+ const uint8x16_t mtwo = vdupq_n_u8(2);
+
+ // 1x8 tile = 2 x 4
+ float32x4_t acc_f32[ncols_interleaved / 4];
+
+ const block_q8_K * GGML_RESTRICT q8_ptr = (const block_q8_K *) vy;
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q5_Kx8 * GGML_RESTRICT q5_ptr = (const block_q5_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < ncols_interleaved / 4; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ float32x4_t q5_d_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q5_ptr[b].d)); // d0 d1 d2 d3
+ float32x4_t q5_d_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q5_ptr[b].d + 4)); // d4 d5 d6 d7
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d);
+ float32x4_t sb_scale_0 = vmulq_f32(q5_d_0, q8_d);
+ float32x4_t sb_scale_1 = vmulq_f32(q5_d_1, q8_d);
+ float32x4_t q5_dmin_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q5_ptr[b].dmin)); // dmin 0..3
+ float32x4_t q5_dmin_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q5_ptr[b].dmin + 4)); // dmin 4..7
+ float32x4_t sb_min_0 = vmulq_f32(q5_dmin_0, q8_d);
+ float32x4_t sb_min_1 = vmulq_f32(q5_dmin_1, q8_d);
+
+ // 2 sb each iteration
+ int32x4_t acc_lo[col_pairs];
+ int32x4_t acc_hi[col_pairs];
+
+ // Each bsum is 16 elements, pairwise add leaves us with the 8 bsums of the entire block
+ const int16x8_t bsums = vpaddq_s16(vld1q_s16(q8_ptr[b].bsums), vld1q_s16(q8_ptr[b].bsums + 8));
+ int16_t bsums_arr[8];
+ vst1q_s16(bsums_arr, bsums);
+
+ // Load qh once per block and shift after each subblock
+ const uint8_t * qh_base = q5_ptr[b].qh;
+ uint8x16_t qh[col_pairs][4];
+ for (int cp = 0; cp < col_pairs; cp++) {
+ qh[cp][0] = vld1q_u8(qh_base + 16 * cp);
+ qh[cp][1] = vld1q_u8(qh_base + 16 * cp + 64);
+ qh[cp][2] = vld1q_u8(qh_base + 16 * cp + 128);
+ qh[cp][3] = vld1q_u8(qh_base + 16 * cp + 192);
+ }
+
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ for (int i = 0; i < col_pairs; i++) {
+ acc_lo[i] = vdupq_n_s32(0);
+ acc_hi[i] = vdupq_n_s32(0);
+ }
+ // Need scales for the low and high nibbles
+ // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total
+ int16x8_t q5sb_mins[2]; // int16 as its needed for bias_acc later
+ int16x8_t q5sb_scales[2];
+ for (int i = 0; i < 2; i++) {
+ int8_t aux_q5sb[8];
+ const int offset = sb * 24 + i * 12;
+ decode_q_Kx8_6bit_scales(&q5_ptr[b].scales[offset], &q5sb_mins[i], aux_q5sb);
+ q5sb_scales[i] = vmovl_s8(vld1_s8(aux_q5sb));
+ }
+
+ const uint8_t * qs_base = q5_ptr[b].qs + sb * QK_K;
+
+ // Load the 64 quants from q8K duplicated to use vecdots with the interleaved columns
+ const int8_t * q8_base = q8_ptr[b].qs + sb * 64;
+ int8x16_t q8_qs[8];
+ for (int i = 0; i < 8; i++) {
+ q8_qs[i] = (int8x16_t) vld1q_dup_s64((const int64_t *) (q8_base + i * 8));
+ }
+
+ // Q5s column pair loop unrolled
+ {
+ // Cols 01
+ uint8x16_t qs_0 = vld1q_u8(qs_base);
+ uint8x16_t qs_1 = vld1q_u8(qs_base + 64);
+ uint8x16_t qs_2 = vld1q_u8(qs_base + 128);
+ uint8x16_t qs_3 = vld1q_u8(qs_base + 192);
+
+ uint8x16_t hbit_lo_0 = vandq_u8(qh[0][0], mone);
+ uint8x16_t hbit_lo_1 = vandq_u8(qh[0][1], mone);
+ uint8x16_t hbit_lo_2 = vandq_u8(qh[0][2], mone);
+ uint8x16_t hbit_lo_3 = vandq_u8(qh[0][3], mone);
+ uint8x16_t hbit_hi_0 = vshlq_n_u8(vandq_u8(qh[0][0], mtwo), 3);
+ uint8x16_t hbit_hi_1 = vshlq_n_u8(vandq_u8(qh[0][1], mtwo), 3);
+ uint8x16_t hbit_hi_2 = vshlq_n_u8(vandq_u8(qh[0][2], mtwo), 3);
+ uint8x16_t hbit_hi_3 = vshlq_n_u8(vandq_u8(qh[0][3], mtwo), 3);
+
+ qh[0][0] = vshrq_n_u8(qh[0][0], 2);
+ qh[0][1] = vshrq_n_u8(qh[0][1], 2);
+ qh[0][2] = vshrq_n_u8(qh[0][2], 2);
+ qh[0][3] = vshrq_n_u8(qh[0][3], 2);
+
+ acc_lo[0] = ggml_vdotq_s32(
+ acc_lo[0], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_0, m4b), hbit_lo_0, 4)), q8_qs[0]);
+ acc_lo[0] = ggml_vdotq_s32(
+ acc_lo[0], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_1, m4b), hbit_lo_1, 4)), q8_qs[1]);
+ acc_lo[0] = ggml_vdotq_s32(
+ acc_lo[0], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_2, m4b), hbit_lo_2, 4)), q8_qs[2]);
+ acc_lo[0] = ggml_vdotq_s32(
+ acc_lo[0], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_3, m4b), hbit_lo_3, 4)), q8_qs[3]);
+ acc_hi[0] = ggml_vdotq_s32(acc_hi[0], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_0, 4), hbit_hi_0)),
+ q8_qs[4]);
+ acc_hi[0] = ggml_vdotq_s32(acc_hi[0], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_1, 4), hbit_hi_1)),
+ q8_qs[5]);
+ acc_hi[0] = ggml_vdotq_s32(acc_hi[0], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_2, 4), hbit_hi_2)),
+ q8_qs[6]);
+ acc_hi[0] = ggml_vdotq_s32(acc_hi[0], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_3, 4), hbit_hi_3)),
+ q8_qs[7]);
+
+ // Cols 23
+ qs_0 = vld1q_u8(qs_base + 16);
+ qs_1 = vld1q_u8(qs_base + 80);
+ qs_2 = vld1q_u8(qs_base + 144);
+ qs_3 = vld1q_u8(qs_base + 208);
+
+ hbit_lo_0 = vandq_u8(qh[1][0], mone);
+ hbit_lo_1 = vandq_u8(qh[1][1], mone);
+ hbit_lo_2 = vandq_u8(qh[1][2], mone);
+ hbit_lo_3 = vandq_u8(qh[1][3], mone);
+ hbit_hi_0 = vshlq_n_u8(vandq_u8(qh[1][0], mtwo), 3);
+ hbit_hi_1 = vshlq_n_u8(vandq_u8(qh[1][1], mtwo), 3);
+ hbit_hi_2 = vshlq_n_u8(vandq_u8(qh[1][2], mtwo), 3);
+ hbit_hi_3 = vshlq_n_u8(vandq_u8(qh[1][3], mtwo), 3);
+
+ qh[1][0] = vshrq_n_u8(qh[1][0], 2);
+ qh[1][1] = vshrq_n_u8(qh[1][1], 2);
+ qh[1][2] = vshrq_n_u8(qh[1][2], 2);
+ qh[1][3] = vshrq_n_u8(qh[1][3], 2);
+
+ acc_lo[1] = ggml_vdotq_s32(
+ acc_lo[1], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_0, m4b), hbit_lo_0, 4)), q8_qs[0]);
+ acc_lo[1] = ggml_vdotq_s32(
+ acc_lo[1], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_1, m4b), hbit_lo_1, 4)), q8_qs[1]);
+ acc_lo[1] = ggml_vdotq_s32(
+ acc_lo[1], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_2, m4b), hbit_lo_2, 4)), q8_qs[2]);
+ acc_lo[1] = ggml_vdotq_s32(
+ acc_lo[1], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_3, m4b), hbit_lo_3, 4)), q8_qs[3]);
+ acc_hi[1] = ggml_vdotq_s32(acc_hi[1], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_0, 4), hbit_hi_0)),
+ q8_qs[4]);
+ acc_hi[1] = ggml_vdotq_s32(acc_hi[1], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_1, 4), hbit_hi_1)),
+ q8_qs[5]);
+ acc_hi[1] = ggml_vdotq_s32(acc_hi[1], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_2, 4), hbit_hi_2)),
+ q8_qs[6]);
+ acc_hi[1] = ggml_vdotq_s32(acc_hi[1], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_3, 4), hbit_hi_3)),
+ q8_qs[7]);
+
+ // Cols 45
+ qs_0 = vld1q_u8(qs_base + 32);
+ qs_1 = vld1q_u8(qs_base + 96);
+ qs_2 = vld1q_u8(qs_base + 160);
+ qs_3 = vld1q_u8(qs_base + 224);
+
+ hbit_lo_0 = vandq_u8(qh[2][0], mone);
+ hbit_lo_1 = vandq_u8(qh[2][1], mone);
+ hbit_lo_2 = vandq_u8(qh[2][2], mone);
+ hbit_lo_3 = vandq_u8(qh[2][3], mone);
+ hbit_hi_0 = vshlq_n_u8(vandq_u8(qh[2][0], mtwo), 3);
+ hbit_hi_1 = vshlq_n_u8(vandq_u8(qh[2][1], mtwo), 3);
+ hbit_hi_2 = vshlq_n_u8(vandq_u8(qh[2][2], mtwo), 3);
+ hbit_hi_3 = vshlq_n_u8(vandq_u8(qh[2][3], mtwo), 3);
+
+ qh[2][0] = vshrq_n_u8(qh[2][0], 2);
+ qh[2][1] = vshrq_n_u8(qh[2][1], 2);
+ qh[2][2] = vshrq_n_u8(qh[2][2], 2);
+ qh[2][3] = vshrq_n_u8(qh[2][3], 2);
+
+ acc_lo[2] = ggml_vdotq_s32(
+ acc_lo[2], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_0, m4b), hbit_lo_0, 4)), q8_qs[0]);
+ acc_lo[2] = ggml_vdotq_s32(
+ acc_lo[2], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_1, m4b), hbit_lo_1, 4)), q8_qs[1]);
+ acc_lo[2] = ggml_vdotq_s32(
+ acc_lo[2], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_2, m4b), hbit_lo_2, 4)), q8_qs[2]);
+ acc_lo[2] = ggml_vdotq_s32(
+ acc_lo[2], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_3, m4b), hbit_lo_3, 4)), q8_qs[3]);
+ acc_hi[2] = ggml_vdotq_s32(acc_hi[2], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_0, 4), hbit_hi_0)),
+ q8_qs[4]);
+ acc_hi[2] = ggml_vdotq_s32(acc_hi[2], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_1, 4), hbit_hi_1)),
+ q8_qs[5]);
+ acc_hi[2] = ggml_vdotq_s32(acc_hi[2], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_2, 4), hbit_hi_2)),
+ q8_qs[6]);
+ acc_hi[2] = ggml_vdotq_s32(acc_hi[2], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_3, 4), hbit_hi_3)),
+ q8_qs[7]);
+
+ // Cols 45
+ qs_0 = vld1q_u8(qs_base + 48);
+ qs_1 = vld1q_u8(qs_base + 112);
+ qs_2 = vld1q_u8(qs_base + 176);
+ qs_3 = vld1q_u8(qs_base + 240);
+
+ hbit_lo_0 = vandq_u8(qh[3][0], mone);
+ hbit_lo_1 = vandq_u8(qh[3][1], mone);
+ hbit_lo_2 = vandq_u8(qh[3][2], mone);
+ hbit_lo_3 = vandq_u8(qh[3][3], mone);
+ hbit_hi_0 = vshlq_n_u8(vandq_u8(qh[3][0], mtwo), 3);
+ hbit_hi_1 = vshlq_n_u8(vandq_u8(qh[3][1], mtwo), 3);
+ hbit_hi_2 = vshlq_n_u8(vandq_u8(qh[3][2], mtwo), 3);
+ hbit_hi_3 = vshlq_n_u8(vandq_u8(qh[3][3], mtwo), 3);
+
+ qh[3][0] = vshrq_n_u8(qh[3][0], 2);
+ qh[3][1] = vshrq_n_u8(qh[3][1], 2);
+ qh[3][2] = vshrq_n_u8(qh[3][2], 2);
+ qh[3][3] = vshrq_n_u8(qh[3][3], 2);
+
+ acc_lo[3] = ggml_vdotq_s32(
+ acc_lo[3], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_0, m4b), hbit_lo_0, 4)), q8_qs[0]);
+ acc_lo[3] = ggml_vdotq_s32(
+ acc_lo[3], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_1, m4b), hbit_lo_1, 4)), q8_qs[1]);
+ acc_lo[3] = ggml_vdotq_s32(
+ acc_lo[3], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_2, m4b), hbit_lo_2, 4)), q8_qs[2]);
+ acc_lo[3] = ggml_vdotq_s32(
+ acc_lo[3], vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_3, m4b), hbit_lo_3, 4)), q8_qs[3]);
+ acc_hi[3] = ggml_vdotq_s32(acc_hi[3], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_0, 4), hbit_hi_0)),
+ q8_qs[4]);
+ acc_hi[3] = ggml_vdotq_s32(acc_hi[3], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_1, 4), hbit_hi_1)),
+ q8_qs[5]);
+ acc_hi[3] = ggml_vdotq_s32(acc_hi[3], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_2, 4), hbit_hi_2)),
+ q8_qs[6]);
+ acc_hi[3] = ggml_vdotq_s32(acc_hi[3], vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_3, 4), hbit_hi_3)),
+ q8_qs[7]);
+ }
+
+ // Prepare bsum vectors for bias computation
+ // Each pair of subblocks share the same bsums
+ int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[2 * sb + 0]);
+ int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[2 * sb + 1]);
+
+ // Iterates over a pair of column pairs (4 columns) to use a single 128 register
+ // p = 0 -> 0123 p2 -> 4567
+ for (int i = 0, p = 0; p < col_pairs; i++, p += 2) {
+ int16x4_t group_scales_lo = p == 0 ? vget_low_s16(q5sb_scales[0]) : vget_high_s16(q5sb_scales[0]);
+ int16x4_t group_scales_hi = p == 0 ? vget_low_s16(q5sb_scales[1]) : vget_high_s16(q5sb_scales[1]);
+ int16x4_t group_mins_lo = p == 0 ? vget_low_s16(q5sb_mins[0]) : vget_high_s16(q5sb_mins[0]);
+ int16x4_t group_mins_hi = p == 0 ? vget_low_s16(q5sb_mins[1]) : vget_high_s16(q5sb_mins[1]);
+ float32x4_t sb_scale = p == 0 ? sb_scale_0 : sb_scale_1;
+ float32x4_t sb_min = p == 0 ? sb_min_0 : sb_min_1;
+
+ // 0123 or 4567
+ float32x4_t sumf_0 =
+ vcvtq_f32_s32(vmulq_s32(vmovl_s16(group_scales_lo), vpaddq_s32(acc_lo[p], acc_lo[p + 1])));
+ acc_f32[i] = vfmaq_f32(acc_f32[i], sb_scale, sumf_0);
+
+ float32x4_t sumf_1 =
+ vcvtq_f32_s32(vmulq_s32(vmovl_s16(group_scales_hi), vpaddq_s32(acc_hi[p], acc_hi[p + 1])));
+ acc_f32[i] = vfmaq_f32(acc_f32[i], sb_scale, sumf_1);
+
+ // FUSED BIAS: Compute and subtract bias immediately
+ // bias = (bsums_lo * mins_lo + bsums_hi * mins_hi) * sb_min
+ int32x4_t bias = vmull_s16(bsums_vec_lo, group_mins_lo);
+ bias = vmlal_s16(bias, bsums_vec_hi, group_mins_hi);
+ float32x4_t bias_f32 = vcvtq_f32_s32(bias);
+ acc_f32[i] = vmlsq_f32(acc_f32[i], sb_min, bias_f32);
+ }
+ } // for sb
+ } // for b
+
+ int base = x * ncols_interleaved;
+ vst1q_f32(s + base, acc_f32[0]);
+ vst1q_f32(s + base + 4, acc_f32[1]);
+ } // for x
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q5_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q6_K_8x4_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 4;
+
+ assert(n % qk == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ constexpr int col_groups = ncols_interleaved / 4;
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ const uint8x16_t mask_lo = vdupq_n_u8(0x03);
+ const uint8x16_t mask_hi = vdupq_n_u8(0x30);
+
+ // 1x8 tile = 2 x 4
+ float32x4_t acc_f32[2];
+
+ const block_q8_K * GGML_RESTRICT q8_ptr = (const block_q8_K *) vy;
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q6_Kx8 * GGML_RESTRICT q6_ptr = (const block_q6_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < col_groups; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ float32x4_t q6_d_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q6_ptr[b].d)); // d0 d1 d2 d3
+ float32x4_t q6_d_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q6_ptr[b].d + 4)); // d4 d5 d6 d7
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d);
+ float32x4_t sb_scale_0 = vmulq_f32(q6_d_0, q8_d);
+ float32x4_t sb_scale_1 = vmulq_f32(q6_d_1, q8_d);
+
+ int32x4_t acc[col_groups];
+ for (int i = 0; i < col_groups; i++) {
+ acc[i] = vdupq_n_s32(0);
+ }
+
+ // Load all 16 scales once and widen to int16 (Q6_K has 16 scales per block)
+ // Reused for bias and dequantization later
+ int16_t q6_scales[16 * 8];
+ for (int i = 0; i < 16; i++) {
+ int16x8_t scales = vmovl_s8(vld1_s8(q6_ptr[b].scales + i * 8));
+ vst1q_s16(q6_scales + i * 8, scales);
+ }
+
+ // Compute bias per column using q8 bsums and preloaded scales to skip the -32 shift
+ int32x4_t bias_lo = vdupq_n_s32(0);
+ int32x4_t bias_hi = vdupq_n_s32(0);
+
+ // Load bsums in chunks of 4 to process with vectorized operations
+ for (int i = 0; i < 16; i += 4) {
+ int16x4_t bsums_vec = vld1_s16(q8_ptr[b].bsums + i);
+ int16x4_t scales_lo_0 = vld1_s16(q6_scales + (i + 0) * 8);
+ int16x4_t scales_hi_0 = vld1_s16(q6_scales + (i + 0) * 8 + 4);
+ int16x4_t scales_lo_1 = vld1_s16(q6_scales + (i + 1) * 8);
+ int16x4_t scales_hi_1 = vld1_s16(q6_scales + (i + 1) * 8 + 4);
+ int16x4_t scales_lo_2 = vld1_s16(q6_scales + (i + 2) * 8);
+ int16x4_t scales_hi_2 = vld1_s16(q6_scales + (i + 2) * 8 + 4);
+ int16x4_t scales_lo_3 = vld1_s16(q6_scales + (i + 3) * 8);
+ int16x4_t scales_hi_3 = vld1_s16(q6_scales + (i + 3) * 8 + 4);
+
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_0, bsums_vec, 0);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_0, bsums_vec, 0);
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_1, bsums_vec, 1);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_1, bsums_vec, 1);
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_2, bsums_vec, 2);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_2, bsums_vec, 2);
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_3, bsums_vec, 3);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_3, bsums_vec, 3);
+ }
+ bias_lo = vshlq_n_s32(bias_lo, 5);
+ bias_hi = vshlq_n_s32(bias_hi, 5);
+
+ // Process two 128-value halves per superblock
+ for (int half = 0; half < 2; half++) {
+ const uint8_t * ql_base = q6_ptr[b].ql + half * 512;
+ const uint8_t * qh_base = q6_ptr[b].qh + half * 256;
+
+ // A subblock (sb) is a set of weights that share the scale
+ // Since q6_K scales are per 16 elements
+ // num sbs -> 256 elements / (16 elements/scale * 2 elements/byte * 2 halves)
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ const int8_t * q8_base_l = q8_ptr[b].qs + half * 128 + sb * 16;
+ const int8_t * q8_base_h = q8_base_l + 64;
+
+ // Load and duplicate q8 values (each register covers four interleaved columns of q6)
+ int8x16_t q8_l[4];
+ int8x16_t q8_h[4];
+ for (int i = 0; i < 4; i++) {
+ q8_l[i] = (int8x16_t) vld1q_dup_s32((const int32_t *) (q8_base_l + i * 4));
+ q8_h[i] = (int8x16_t) vld1q_dup_s32((const int32_t *) (q8_base_h + i * 4));
+ }
+
+ const int ql_off_base = sb * QK_K / 2;
+ const int qh_off_base = ql_off_base & 255; // wraps after 256 bytes
+
+ // Load 4 vectors at once (64 bytes each for ql_0, ql_1, qh_0, qh_1)
+ uint8x16x4_t q6_ql_0 = vld1q_u8_x4(ql_base + ql_off_base);
+ uint8x16x4_t q6_ql_1 = vld1q_u8_x4(ql_base + ql_off_base + 64);
+ uint8x16x4_t q6_qh_0 = vld1q_u8_x4(qh_base + qh_off_base);
+ uint8x16x4_t q6_qh_1 = vld1q_u8_x4(qh_base + qh_off_base + 64);
+
+ // Adjust qh for subblocks 2 and 3 (shift right by 2)
+ if (sb > 1) {
+ q6_qh_0.val[0] = vshrq_n_u8(q6_qh_0.val[0], 2);
+ q6_qh_0.val[1] = vshrq_n_u8(q6_qh_0.val[1], 2);
+ q6_qh_0.val[2] = vshrq_n_u8(q6_qh_0.val[2], 2);
+ q6_qh_0.val[3] = vshrq_n_u8(q6_qh_0.val[3], 2);
+ q6_qh_1.val[0] = vshrq_n_u8(q6_qh_1.val[0], 2);
+ q6_qh_1.val[1] = vshrq_n_u8(q6_qh_1.val[1], 2);
+ q6_qh_1.val[2] = vshrq_n_u8(q6_qh_1.val[2], 2);
+ q6_qh_1.val[3] = vshrq_n_u8(q6_qh_1.val[3], 2);
+ }
+
+ const uint8x16_t q6_ql[8] = { q6_ql_0.val[0], q6_ql_0.val[1], q6_ql_0.val[2], q6_ql_0.val[3],
+ q6_ql_1.val[0], q6_ql_1.val[1], q6_ql_1.val[2], q6_ql_1.val[3] };
+ const uint8x16_t q6_qh[8] = { q6_qh_0.val[0], q6_qh_0.val[1], q6_qh_0.val[2], q6_qh_0.val[3],
+ q6_qh_1.val[0], q6_qh_1.val[1], q6_qh_1.val[2], q6_qh_1.val[3] };
+
+ // Process column groups (0-3, 4-7)
+ for (int g = 0; g < col_groups; g++) {
+ int32x4_t sb_acc_l = vdupq_n_s32(0);
+ int32x4_t sb_acc_h = vdupq_n_s32(0);
+
+ for (int chunk = 0; chunk < 4; chunk++) {
+ const int idx = chunk * 2 + g;
+
+ const uint8x16_t q6_qs_l = q6_ql[idx];
+ const uint8x16_t q6_qs_h = q6_qh[idx];
+
+ // Extract high 2 bits for upper nibble reconstruction
+ const uint8x16_t q6_qs_hh = vandq_u8(q6_qs_h, mask_hi);
+
+ // q6 = (low4 | high2<<4), without -32 bias (handled via bsums)
+ const int8x16_t q6_l =
+ vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(q6_qs_l, m4b), vandq_u8(q6_qs_h, mask_lo), 4));
+ const int8x16_t q6_h = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6_qs_l, 4), q6_qs_hh));
+
+ sb_acc_l = vdotq_s32(sb_acc_l, q6_l, q8_l[chunk]);
+ sb_acc_h = vdotq_s32(sb_acc_h, q6_h, q8_h[chunk]);
+ }
+
+ const int scale_idx_l = half * 8 + sb;
+ const int scale_idx_h = half * 8 + sb + 4;
+
+ const int32x4_t scale_vec_l = vmovl_s16(vld1_s16(q6_scales + scale_idx_l * 8 + g * 4));
+ const int32x4_t scale_vec_h = vmovl_s16(vld1_s16(q6_scales + scale_idx_h * 8 + g * 4));
+
+ acc[g] = vmlaq_s32(acc[g], sb_acc_l, scale_vec_l);
+ acc[g] = vmlaq_s32(acc[g], sb_acc_h, scale_vec_h);
+ }
+ }
+ } // for half
+
+ // Bias correction
+ acc[0] = vsubq_s32(acc[0], bias_lo);
+ acc[1] = vsubq_s32(acc[1], bias_hi);
+
+ // Apply superblock scale (no mins for q6_K)
+ // acc[g] has [c0, c1, c2, c3]
+ float32x4_t w_0123 = vmulq_f32(vcvtq_f32_s32(acc[0]), sb_scale_0);
+ float32x4_t w_4567 = vmulq_f32(vcvtq_f32_s32(acc[1]), sb_scale_1);
+
+ acc_f32[0] = vaddq_f32(acc_f32[0], w_0123);
+ acc_f32[1] = vaddq_f32(acc_f32[1], w_4567);
+ } // for b
+
+ int base = x * ncols_interleaved;
+ vst1q_f32(s + base, acc_f32[0]);
+ vst1q_f32(s + base + 4, acc_f32[1]);
+ } // for x
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q6_K_8x4_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q6_K_8x8_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ constexpr int col_pairs = ncols_interleaved / 2;
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ const uint8x16_t mask_lo = vdupq_n_u8(0x03);
+ const uint8x16_t mask_hi = vdupq_n_u8(0x30);
+
+ // 1x8 tile = 2 x 4
+ float32x4_t acc_f32[2];
+
+ const block_q8_K * GGML_RESTRICT q8_ptr = (const block_q8_K *) vy;
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q6_Kx8 * GGML_RESTRICT q6_ptr = (const block_q6_Kx8 *) vx + (x * nb);
+
+ acc_f32[0] = vdupq_n_f32(0);
+ acc_f32[1] = vdupq_n_f32(0);
+
+ for (int b = 0; b < nb; b++) {
+ float32x4_t q6_d_0 = vcvt_f32_f16(vld1_f16((const __fp16 *) q6_ptr[b].d)); // d0 d1 d2 d3
+ float32x4_t q6_d_1 = vcvt_f32_f16(vld1_f16((const __fp16 *) q6_ptr[b].d + 4)); // d4 d5 d6 d7
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d);
+ float32x4_t sb_scale_0 = vmulq_f32(q6_d_0, q8_d);
+ float32x4_t sb_scale_1 = vmulq_f32(q6_d_1, q8_d);
+
+ int32x2_t acc[col_pairs];
+ for (int i = 0; i < col_pairs; i++) {
+ acc[i] = vdup_n_s32(0);
+ }
+
+ // Load all 16 scales once and widen to int16 (Q6_K has 16 scales per block)
+ // Reused for bias and dequantization later
+ int16_t q6_scales[16 * 8];
+ for (int i = 0; i < 16; i++) {
+ int16x8_t scales = vmovl_s8(vld1_s8(q6_ptr[b].scales + i * 8));
+ vst1q_s16(q6_scales + i * 8, scales);
+ }
+
+ // Compute bias per column using q8 bsums and preloaded scales to skip the -32 shift
+ int32x4_t bias_lo = vdupq_n_s32(0);
+ int32x4_t bias_hi = vdupq_n_s32(0);
+
+ // Load bsums in chunks of 4 to process with vectorized operations
+ for (int i = 0; i < 16; i += 4) {
+ int16x4_t bsums_vec = vld1_s16(q8_ptr[b].bsums + i);
+ int16x4_t scales_lo_0 = vld1_s16(q6_scales + (i + 0) * 8);
+ int16x4_t scales_hi_0 = vld1_s16(q6_scales + (i + 0) * 8 + 4);
+ int16x4_t scales_lo_1 = vld1_s16(q6_scales + (i + 1) * 8);
+ int16x4_t scales_hi_1 = vld1_s16(q6_scales + (i + 1) * 8 + 4);
+ int16x4_t scales_lo_2 = vld1_s16(q6_scales + (i + 2) * 8);
+ int16x4_t scales_hi_2 = vld1_s16(q6_scales + (i + 2) * 8 + 4);
+ int16x4_t scales_lo_3 = vld1_s16(q6_scales + (i + 3) * 8);
+ int16x4_t scales_hi_3 = vld1_s16(q6_scales + (i + 3) * 8 + 4);
+
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_0, bsums_vec, 0);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_0, bsums_vec, 0);
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_1, bsums_vec, 1);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_1, bsums_vec, 1);
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_2, bsums_vec, 2);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_2, bsums_vec, 2);
+ bias_lo = vmlal_lane_s16(bias_lo, scales_lo_3, bsums_vec, 3);
+ bias_hi = vmlal_lane_s16(bias_hi, scales_hi_3, bsums_vec, 3);
+ }
+ bias_lo = vshlq_n_s32(bias_lo, 5);
+ bias_hi = vshlq_n_s32(bias_hi, 5);
+
+ // Process two 128-value halves per superblock
+ for (int half = 0; half < 2; half++) {
+ const uint8_t * ql_base = q6_ptr[b].ql + half * 512;
+ const uint8_t * qh_base = q6_ptr[b].qh + half * 256;
+
+ // A subblock (sb) is a set of weights that share the scale
+ // Since q6_K scales are per 16 elements
+ // num sbs -> 256 elements / (16 elements/scale * 2 elements/byte * 2 halves)
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ const int8_t * q8_base_l = q8_ptr[b].qs + half * 128 + sb * 16;
+ const int8_t * q8_base_h = q8_base_l + 64;
+
+ // Load and duplicate q8 values (each register covers two interleaved columns of q6)
+ int8x16_t q8_l[2];
+ int8x16_t q8_h[2];
+ for (int i = 0; i < 2; i++) {
+ q8_l[i] = (int8x16_t) vld1q_dup_s64((const int64_t *) (q8_base_l + i * 8));
+ q8_h[i] = (int8x16_t) vld1q_dup_s64((const int64_t *) (q8_base_h + i * 8));
+ }
+
+ const int ql_off_base = sb * QK_K / 2;
+ const int qh_off_base = ql_off_base & 255; // wraps after 256 bytes
+
+ // Load 4 vectors at once (64 bytes each for ql_0, ql_1, qh_0, qh_1)
+ uint8x16x4_t q6_ql_0 = vld1q_u8_x4(ql_base + ql_off_base);
+ uint8x16x4_t q6_ql_1 = vld1q_u8_x4(ql_base + ql_off_base + 64);
+ uint8x16x4_t q6_qh_0 = vld1q_u8_x4(qh_base + qh_off_base);
+ uint8x16x4_t q6_qh_1 = vld1q_u8_x4(qh_base + qh_off_base + 64);
+
+ // Adjust qh for subblocks 2 and 3 (shift right by 2)
+ if (sb > 1) {
+ q6_qh_0.val[0] = vshrq_n_u8(q6_qh_0.val[0], 2);
+ q6_qh_0.val[1] = vshrq_n_u8(q6_qh_0.val[1], 2);
+ q6_qh_0.val[2] = vshrq_n_u8(q6_qh_0.val[2], 2);
+ q6_qh_0.val[3] = vshrq_n_u8(q6_qh_0.val[3], 2);
+ q6_qh_1.val[0] = vshrq_n_u8(q6_qh_1.val[0], 2);
+ q6_qh_1.val[1] = vshrq_n_u8(q6_qh_1.val[1], 2);
+ q6_qh_1.val[2] = vshrq_n_u8(q6_qh_1.val[2], 2);
+ q6_qh_1.val[3] = vshrq_n_u8(q6_qh_1.val[3], 2);
+ }
+
+ // Process column pairs (0-1, 2-3, 4-5, 6-7)
+ for (int cp = 0; cp < col_pairs; cp++) {
+ const uint8x16_t q6_qs_cp_0_l = q6_ql_0.val[cp];
+ const uint8x16_t q6_qs_cp_1_l = q6_ql_1.val[cp];
+ const uint8x16_t q6_qs_cp_0_h = q6_qh_0.val[cp];
+ const uint8x16_t q6_qs_cp_1_h = q6_qh_1.val[cp];
+
+ // Extract high 2 bits for upper nibble reconstruction
+ const uint8x16_t q6_qs_cp_0_hh = vandq_u8(q6_qs_cp_0_h, mask_hi);
+ const uint8x16_t q6_qs_cp_1_hh = vandq_u8(q6_qs_cp_1_h, mask_hi);
+
+ // q6 = (low4 | high2<<4), without -32 bias (handled via bsums)
+ const int8x16_t q6_l0 = vreinterpretq_s8_u8(
+ vsliq_n_u8(vandq_u8(q6_qs_cp_0_l, m4b), vandq_u8(q6_qs_cp_0_h, mask_lo), 4));
+ const int8x16_t q6_l1 = vreinterpretq_s8_u8(
+ vsliq_n_u8(vandq_u8(q6_qs_cp_1_l, m4b), vandq_u8(q6_qs_cp_1_h, mask_lo), 4));
+ const int8x16_t q6_h0 =
+ vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6_qs_cp_0_l, 4), q6_qs_cp_0_hh));
+ const int8x16_t q6_h1 =
+ vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6_qs_cp_1_l, 4), q6_qs_cp_1_hh));
+
+ int32x4_t sb_acc_l = vdupq_n_s32(0);
+ sb_acc_l = vdotq_s32(sb_acc_l, q6_l0, q8_l[0]);
+ sb_acc_l = vdotq_s32(sb_acc_l, q6_l1, q8_l[1]);
+
+ int32x4_t sb_acc_h = vdupq_n_s32(0);
+ sb_acc_h = vdotq_s32(sb_acc_h, q6_h0, q8_h[0]);
+ sb_acc_h = vdotq_s32(sb_acc_h, q6_h1, q8_h[1]);
+
+ // Pairwise add to get per-column sums: [col0, col1]
+ int32x2_t sum_l = vpadd_s32(vget_low_s32(sb_acc_l), vget_high_s32(sb_acc_l));
+ int32x2_t sum_h = vpadd_s32(vget_low_s32(sb_acc_h), vget_high_s32(sb_acc_h));
+
+ const int scale_idx_l = half * 8 + sb;
+ const int scale_idx_h = half * 8 + sb + 4;
+
+ // Access scales using array indexing (scales are interleaved by column)
+ const int32x2_t scale_vec_l = { (int32_t) q6_scales[scale_idx_l * 8 + cp * 2],
+ (int32_t) q6_scales[scale_idx_l * 8 + cp * 2 + 1] };
+ const int32x2_t scale_vec_h = { (int32_t) q6_scales[scale_idx_h * 8 + cp * 2],
+ (int32_t) q6_scales[scale_idx_h * 8 + cp * 2 + 1] };
+
+ // Accumulate scaled results
+ acc[cp] = vmla_s32(acc[cp], sum_l, scale_vec_l);
+ acc[cp] = vmla_s32(acc[cp], sum_h, scale_vec_h);
+ }
+ }
+ } // for half
+
+ // Bias correction
+ acc[0] = vsub_s32(acc[0], vget_low_s32(bias_lo));
+ acc[1] = vsub_s32(acc[1], vget_high_s32(bias_lo));
+ acc[2] = vsub_s32(acc[2], vget_low_s32(bias_hi));
+ acc[3] = vsub_s32(acc[3], vget_high_s32(bias_hi));
+
+ // Apply superblock scale (no mins for q6_K)
+ // acc[cp] has [c0, c1]
+ float32x2_t w_01 = vmul_f32(vcvt_f32_s32(acc[0]), vget_low_f32(sb_scale_0));
+ float32x2_t w_23 = vmul_f32(vcvt_f32_s32(acc[1]), vget_high_f32(sb_scale_0));
+ float32x2_t w_45 = vmul_f32(vcvt_f32_s32(acc[2]), vget_low_f32(sb_scale_1));
+ float32x2_t w_67 = vmul_f32(vcvt_f32_s32(acc[3]), vget_high_f32(sb_scale_1));
+
+ acc_f32[0] = vaddq_f32(acc_f32[0], vcombine_f32(w_01, w_23));
+ acc_f32[1] = vaddq_f32(acc_f32[1], vcombine_f32(w_45, w_67));
+ } // for b
+
+ int base = x * ncols_interleaved;
+ vst1q_f32(s + base, acc_f32[0]);
+ vst1q_f32(s + base + 4, acc_f32[1]);
+ } // for x
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q6_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q8_0_4x4_q8_0(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 4;
+
+ assert(n % qk == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx;
+
+ for (int c = 0; c < nc; c += ncols_interleaved) {
+ const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
+ float32x4_t acc = vdupq_n_f32(0);
+ for (int b = 0; b < nb; b++) {
+ int8x16x4_t b_low = vld1q_s8_x4((const int8_t *) b_ptr->qs);
+ int8x16x4_t b_high = vld1q_s8_x4((const int8_t *) b_ptr->qs + 64);
+ float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d);
+
+ int8x16x2_t a = vld1q_s8_x2(a_ptr->qs);
+ float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d);
+
+ int32x4_t ret = vdupq_n_s32(0);
+
+ ret = vdotq_laneq_s32(ret, b_low.val[0], a.val[0], 0);
+ ret = vdotq_laneq_s32(ret, b_low.val[1], a.val[0], 1);
+ ret = vdotq_laneq_s32(ret, b_low.val[2], a.val[0], 2);
+ ret = vdotq_laneq_s32(ret, b_low.val[3], a.val[0], 3);
+
+ ret = vdotq_laneq_s32(ret, b_high.val[0], a.val[1], 0);
+ ret = vdotq_laneq_s32(ret, b_high.val[1], a.val[1], 1);
+ ret = vdotq_laneq_s32(ret, b_high.val[2], a.val[1], 2);
+ ret = vdotq_laneq_s32(ret, b_high.val[3], a.val[1], 3);
+
+ acc = vfmaq_f32(acc, vcvtq_f32_s32(ret), vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd)));
+ a_ptr++;
+ b_ptr++;
+ }
+ vst1q_f32(s, acc);
+ s += ncols_interleaved;
+ }
+ return;
+
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q8_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemv_q8_0_4x8_q8_0(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx;
+
+ for (int c = 0; c < nc; c += ncols_interleaved) {
+ const block_q8_0 * a_ptr = (const block_q8_0 *) vy;
+ float32x4_t acc = vdupq_n_f32(0);
+
+ for (int b = 0; b < nb; b++) {
+ int8x16x4_t b_low = vld1q_s8_x4((const int8_t *) b_ptr->qs);
+ int8x16x4_t b_high = vld1q_s8_x4((const int8_t *) b_ptr->qs + 64);
+ float16x4_t bd = vld1_f16((const __fp16 *) b_ptr->d);
+
+ int8x8x4_t a_chunks = vld1_s8_x4(a_ptr->qs);
+ int8x16_t a0 = vcombine_s8(a_chunks.val[0], a_chunks.val[0]);
+ int8x16_t a1 = vcombine_s8(a_chunks.val[1], a_chunks.val[1]);
+ int8x16_t a2 = vcombine_s8(a_chunks.val[2], a_chunks.val[2]);
+ int8x16_t a3 = vcombine_s8(a_chunks.val[3], a_chunks.val[3]);
+ float16x4_t ad = vld1_dup_f16((const __fp16 *) &a_ptr->d);
+
+ int32x4_t ret0 = vdupq_n_s32(0);
+ int32x4_t ret1 = vdupq_n_s32(0);
+
+ // 0..7
+ ret0 = vdotq_s32(ret0, b_low.val[0], a0);
+ ret1 = vdotq_s32(ret1, b_low.val[1], a0);
+ // 8..15
+ ret0 = vdotq_s32(ret0, b_low.val[2], a1);
+ ret1 = vdotq_s32(ret1, b_low.val[3], a1);
+ // 16..23
+ ret0 = vdotq_s32(ret0, b_high.val[0], a2);
+ ret1 = vdotq_s32(ret1, b_high.val[1], a2);
+ // 24..31
+ ret0 = vdotq_s32(ret0, b_high.val[2], a3);
+ ret1 = vdotq_s32(ret1, b_high.val[3], a3);
+
+ int32x4_t ret = vpaddq_s32(ret0, ret1);
+
+ acc = vfmaq_f32(acc, vcvtq_f32_s32(ret), vmulq_f32(vcvt_f32_f16(ad), vcvt_f32_f16(bd)));
+ a_ptr++;
+ b_ptr++;
+ }
+ vst1q_f32(s, acc);
+ s += ncols_interleaved;
+ }
+ return;
+
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemv_q8_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q4_0_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 4;
+
+ assert (n % qk == 0);
+ assert (nr % 4 == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ const void * b_ptr = vx;
+ const void * a_ptr = vy;
+ float * res_ptr = s;
+ size_t res_stride = bs * sizeof(float);
+
+ __asm__ __volatile__(
+ "mov x10, %x[nr]\n"
+ "mov x9, #0x88\n"
+ "cmp x10, #0x10\n"
+ "mul x9, %x[nb], x9\n"
+ "blt 4f\n"
+ "1:" // Row loop
+ "add x28, %x[b_ptr], #0x8\n"
+ "mov x27, %x[nc]\n"
+ "add x26, %x[res_ptr], %x[res_stride], LSL #4\n"
+ "2:" // Column loop
+ "add x25, %x[a_ptr], #0x8\n"
+ "movi v15.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "mov x24, %x[nb]\n"
+ "add x23, x25, x9\n"
+ "movi v18.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "add x22, x23, x9\n"
+ "movi v11.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "add x21, x22, x9\n"
+ "movi v23.16b, #0x0\n"
+ "movi v16.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v7.16b, #0x0\n"
+ "movi v0.16b, #0x0\n"
+ "movi v4.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "movi v21.16b, #0x0\n"
+ "movi v8.16b, #0x0\n"
+ "movi v1.16b, #0x0\n"
+ "3:" // Block loop
+ "ldr q3, [x28, #0x0]\n"
+ "ldr q31, [x25, #0x0]\n"
+ "movi v28.16b, #0x4\n"
+ "movi v10.4s, #0x0\n"
+ "ldr q22, [x28, #0x10]\n"
+ "ldr q6, [x25, #0x10]\n"
+ "movi v29.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ "ldr q27, [x28, #0x20]\n"
+ "ldr q30, [x28, #0x30]\n"
+ "movi v20.4s, #0x0\n"
+ "movi v24.16b, #0xf0\n"
+ "ldr d2, [x25, #-0x8]\n"
+ "ldr d26, [x23, #-0x8]\n"
+ "sshl v12.16b, v3.16b, v28.16b\n"
+ "sub x20, x28, #0x8\n"
+ "ldr d17, [x20, #0x0]\n"
+ "and v3.16b, v3.16b, v24.16b\n"
+ "subs x24, x24, #0x1\n"
+ "add x28, x28, #0x48\n"
+ ".inst 0x4f9fe18a // sdot v10.4s, v12.16b, v31.4b[0]\n"
+ ".inst 0x4fbfe19d // sdot v29.4s, v12.16b, v31.4b[1]\n"
+ ".inst 0x4f9fe989 // sdot v9.4s, v12.16b, v31.4b[2]\n"
+ ".inst 0x4fbfe994 // sdot v20.4s, v12.16b, v31.4b[3]\n"
+ "sshl v31.16b, v22.16b, v28.16b\n"
+ "and v22.16b, v22.16b, v24.16b\n"
+ "fcvtl v17.4s, v17.4h\n"
+ "fcvtl v2.4s, v2.4h\n"
+ "fcvtl v26.4s, v26.4h\n"
+ ".inst 0x4f86e3ea // sdot v10.4s, v31.16b, v6.4b[0]\n"
+ ".inst 0x4fa6e3fd // sdot v29.4s, v31.16b, v6.4b[1]\n"
+ ".inst 0x4f86ebe9 // sdot v9.4s, v31.16b, v6.4b[2]\n"
+ ".inst 0x4fa6ebf4 // sdot v20.4s, v31.16b, v6.4b[3]\n"
+ "sshl v6.16b, v27.16b, v28.16b\n"
+ "sshl v28.16b, v30.16b, v28.16b\n"
+ "and v27.16b, v27.16b, v24.16b\n"
+ "and v30.16b, v30.16b, v24.16b\n"
+ "ldr q24, [x25, #0x20]\n"
+ ".inst 0x4f98e0ca // sdot v10.4s, v6.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n"
+ ".inst 0x4f98e8c9 // sdot v9.4s, v6.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e8d4 // sdot v20.4s, v6.16b, v24.4b[3]\n"
+ "ldr q24, [x25, #0x30]\n"
+ ".inst 0x4f98e38a // sdot v10.4s, v28.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e39d // sdot v29.4s, v28.16b, v24.4b[1]\n"
+ ".inst 0x4f98eb89 // sdot v9.4s, v28.16b, v24.4b[2]\n"
+ ".inst 0x4fb8eb94 // sdot v20.4s, v28.16b, v24.4b[3]\n"
+ "ldr q24, [x25, #0x40]\n"
+ ".inst 0x4f98e06a // sdot v10.4s, v3.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n"
+ ".inst 0x4f98e869 // sdot v9.4s, v3.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e874 // sdot v20.4s, v3.16b, v24.4b[3]\n"
+ "ldr q24, [x25, #0x50]\n"
+ ".inst 0x4f98e2ca // sdot v10.4s, v22.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e2dd // sdot v29.4s, v22.16b, v24.4b[1]\n"
+ ".inst 0x4f98eac9 // sdot v9.4s, v22.16b, v24.4b[2]\n"
+ ".inst 0x4fb8ead4 // sdot v20.4s, v22.16b, v24.4b[3]\n"
+ "ldr q24, [x25, #0x60]\n"
+ ".inst 0x4f98e36a // sdot v10.4s, v27.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n"
+ ".inst 0x4f98eb69 // sdot v9.4s, v27.16b, v24.4b[2]\n"
+ ".inst 0x4fb8eb74 // sdot v20.4s, v27.16b, v24.4b[3]\n"
+ "ldr q24, [x25, #0x70]\n"
+ "add x25, x25, #0x88\n"
+ ".inst 0x4f98e3ca // sdot v10.4s, v30.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e3dd // sdot v29.4s, v30.16b, v24.4b[1]\n"
+ ".inst 0x4f98ebc9 // sdot v9.4s, v30.16b, v24.4b[2]\n"
+ ".inst 0x4fb8ebd4 // sdot v20.4s, v30.16b, v24.4b[3]\n"
+ "fmul v24.4s, v17.4s, v2.s[0]\n"
+ "scvtf v10.4s, v10.4s, #0x4\n"
+ "scvtf v29.4s, v29.4s, #0x4\n"
+ "scvtf v9.4s, v9.4s, #0x4\n"
+ "scvtf v20.4s, v20.4s, #0x4\n"
+ "fmla v15.4s, v10.4s, v24.4s\n"
+ "ldr q24, [x23, #0x0]\n"
+ "fmul v10.4s, v17.4s, v2.s[1]\n"
+ "fmla v19.4s, v29.4s, v10.4s\n"
+ "ldr q10, [x23, #0x10]\n"
+ "fmul v29.4s, v17.4s, v2.s[2]\n"
+ "fmul v2.4s, v17.4s, v2.s[3]\n"
+ "fmla v18.4s, v9.4s, v29.4s\n"
+ "movi v9.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ ".inst 0x4f98e189 // sdot v9.4s, v12.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e19d // sdot v29.4s, v12.16b, v24.4b[1]\n"
+ "fmla v14.4s, v20.4s, v2.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v2.4s, #0x0\n"
+ ".inst 0x4f98e994 // sdot v20.4s, v12.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n"
+ "ldr q24, [x23, #0x20]\n"
+ ".inst 0x4f8ae3e9 // sdot v9.4s, v31.16b, v10.4b[0]\n"
+ ".inst 0x4faae3fd // sdot v29.4s, v31.16b, v10.4b[1]\n"
+ ".inst 0x4f8aebf4 // sdot v20.4s, v31.16b, v10.4b[2]\n"
+ ".inst 0x4faaebe2 // sdot v2.4s, v31.16b, v10.4b[3]\n"
+ "ldr q10, [x23, #0x30]\n"
+ ".inst 0x4f98e0c9 // sdot v9.4s, v6.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e0dd // sdot v29.4s, v6.16b, v24.4b[1]\n"
+ ".inst 0x4f98e8d4 // sdot v20.4s, v6.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n"
+ "ldr q24, [x23, #0x40]\n"
+ ".inst 0x4f8ae389 // sdot v9.4s, v28.16b, v10.4b[0]\n"
+ ".inst 0x4faae39d // sdot v29.4s, v28.16b, v10.4b[1]\n"
+ ".inst 0x4f8aeb94 // sdot v20.4s, v28.16b, v10.4b[2]\n"
+ ".inst 0x4faaeb82 // sdot v2.4s, v28.16b, v10.4b[3]\n"
+ "ldr q10, [x23, #0x50]\n"
+ ".inst 0x4f98e069 // sdot v9.4s, v3.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e07d // sdot v29.4s, v3.16b, v24.4b[1]\n"
+ ".inst 0x4f98e874 // sdot v20.4s, v3.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n"
+ "ldr q24, [x23, #0x60]\n"
+ ".inst 0x4f8ae2c9 // sdot v9.4s, v22.16b, v10.4b[0]\n"
+ ".inst 0x4faae2dd // sdot v29.4s, v22.16b, v10.4b[1]\n"
+ ".inst 0x4f8aead4 // sdot v20.4s, v22.16b, v10.4b[2]\n"
+ ".inst 0x4faaeac2 // sdot v2.4s, v22.16b, v10.4b[3]\n"
+ "ldr q10, [x23, #0x70]\n"
+ "add x23, x23, #0x88\n"
+ ".inst 0x4f98e369 // sdot v9.4s, v27.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e37d // sdot v29.4s, v27.16b, v24.4b[1]\n"
+ ".inst 0x4f98eb74 // sdot v20.4s, v27.16b, v24.4b[2]\n"
+ ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n"
+ "ldr q24, [x22, #0x0]\n"
+ ".inst 0x4f8ae3c9 // sdot v9.4s, v30.16b, v10.4b[0]\n"
+ ".inst 0x4faae3dd // sdot v29.4s, v30.16b, v10.4b[1]\n"
+ ".inst 0x4f8aebd4 // sdot v20.4s, v30.16b, v10.4b[2]\n"
+ ".inst 0x4faaebc2 // sdot v2.4s, v30.16b, v10.4b[3]\n"
+ "fmul v10.4s, v17.4s, v26.s[0]\n"
+ "scvtf v9.4s, v9.4s, #0x4\n"
+ "scvtf v29.4s, v29.4s, #0x4\n"
+ "scvtf v20.4s, v20.4s, #0x4\n"
+ "scvtf v2.4s, v2.4s, #0x4\n"
+ "fmla v11.4s, v9.4s, v10.4s\n"
+ "ldr q9, [x22, #0x10]\n"
+ "fmul v10.4s, v17.4s, v26.s[1]\n"
+ "fmla v13.4s, v29.4s, v10.4s\n"
+ "ldr d29, [x22, #-0x8]\n"
+ "fmul v10.4s, v17.4s, v26.s[2]\n"
+ "fmul v26.4s, v17.4s, v26.s[3]\n"
+ "fcvtl v29.4s, v29.4h\n"
+ "fmla v23.4s, v20.4s, v10.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "fmla v16.4s, v2.4s, v26.4s\n"
+ "movi v26.4s, #0x0\n"
+ "movi v2.4s, #0x0\n"
+ ".inst 0x4f98e194 // sdot v20.4s, v12.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n"
+ ".inst 0x4f98e99a // sdot v26.4s, v12.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e982 // sdot v2.4s, v12.16b, v24.4b[3]\n"
+ "ldr q24, [x22, #0x20]\n"
+ ".inst 0x4f89e3f4 // sdot v20.4s, v31.16b, v9.4b[0]\n"
+ ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n"
+ ".inst 0x4f89ebfa // sdot v26.4s, v31.16b, v9.4b[2]\n"
+ ".inst 0x4fa9ebe2 // sdot v2.4s, v31.16b, v9.4b[3]\n"
+ "ldr q9, [x22, #0x30]\n"
+ ".inst 0x4f98e0d4 // sdot v20.4s, v6.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e0ca // sdot v10.4s, v6.16b, v24.4b[1]\n"
+ ".inst 0x4f98e8da // sdot v26.4s, v6.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e8c2 // sdot v2.4s, v6.16b, v24.4b[3]\n"
+ "ldr q24, [x22, #0x40]\n"
+ ".inst 0x4f89e394 // sdot v20.4s, v28.16b, v9.4b[0]\n"
+ ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n"
+ ".inst 0x4f89eb9a // sdot v26.4s, v28.16b, v9.4b[2]\n"
+ ".inst 0x4fa9eb82 // sdot v2.4s, v28.16b, v9.4b[3]\n"
+ "ldr q9, [x22, #0x50]\n"
+ ".inst 0x4f98e074 // sdot v20.4s, v3.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e06a // sdot v10.4s, v3.16b, v24.4b[1]\n"
+ ".inst 0x4f98e87a // sdot v26.4s, v3.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e862 // sdot v2.4s, v3.16b, v24.4b[3]\n"
+ "ldr q24, [x22, #0x60]\n"
+ ".inst 0x4f89e2d4 // sdot v20.4s, v22.16b, v9.4b[0]\n"
+ ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n"
+ ".inst 0x4f89eada // sdot v26.4s, v22.16b, v9.4b[2]\n"
+ ".inst 0x4fa9eac2 // sdot v2.4s, v22.16b, v9.4b[3]\n"
+ "ldr q9, [x22, #0x70]\n"
+ "add x22, x22, #0x88\n"
+ ".inst 0x4f98e374 // sdot v20.4s, v27.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e36a // sdot v10.4s, v27.16b, v24.4b[1]\n"
+ ".inst 0x4f98eb7a // sdot v26.4s, v27.16b, v24.4b[2]\n"
+ ".inst 0x4fb8eb62 // sdot v2.4s, v27.16b, v24.4b[3]\n"
+ "ldr q24, [x21, #0x0]\n"
+ ".inst 0x4f89e3d4 // sdot v20.4s, v30.16b, v9.4b[0]\n"
+ ".inst 0x4fa9e3ca // sdot v10.4s, v30.16b, v9.4b[1]\n"
+ ".inst 0x4f89ebda // sdot v26.4s, v30.16b, v9.4b[2]\n"
+ ".inst 0x4fa9ebc2 // sdot v2.4s, v30.16b, v9.4b[3]\n"
+ "fmul v9.4s, v17.4s, v29.s[0]\n"
+ "scvtf v20.4s, v20.4s, #0x4\n"
+ "scvtf v10.4s, v10.4s, #0x4\n"
+ "scvtf v26.4s, v26.4s, #0x4\n"
+ "scvtf v2.4s, v2.4s, #0x4\n"
+ "fmla v25.4s, v20.4s, v9.4s\n"
+ "ldr q9, [x21, #0x10]\n"
+ "fmul v20.4s, v17.4s, v29.s[1]\n"
+ "fmla v7.4s, v10.4s, v20.4s\n"
+ "ldr d20, [x21, #-0x8]\n"
+ "fmul v10.4s, v17.4s, v29.s[2]\n"
+ "fmul v29.4s, v17.4s, v29.s[3]\n"
+ "fcvtl v20.4s, v20.4h\n"
+ "fmla v0.4s, v26.4s, v10.4s\n"
+ "movi v26.4s, #0x0\n"
+ "movi v10.4s, #0x0\n"
+ "fmla v4.4s, v2.4s, v29.4s\n"
+ "movi v2.4s, #0x0\n"
+ "movi v29.4s, #0x0\n"
+ ".inst 0x4f98e19a // sdot v26.4s, v12.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e18a // sdot v10.4s, v12.16b, v24.4b[1]\n"
+ ".inst 0x4f98e982 // sdot v2.4s, v12.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e99d // sdot v29.4s, v12.16b, v24.4b[3]\n"
+ "ldr q12, [x21, #0x20]\n"
+ "fmul v24.4s, v17.4s, v20.s[0]\n"
+ ".inst 0x4f89e3fa // sdot v26.4s, v31.16b, v9.4b[0]\n"
+ ".inst 0x4fa9e3ea // sdot v10.4s, v31.16b, v9.4b[1]\n"
+ ".inst 0x4f89ebe2 // sdot v2.4s, v31.16b, v9.4b[2]\n"
+ ".inst 0x4fa9ebfd // sdot v29.4s, v31.16b, v9.4b[3]\n"
+ "ldr q9, [x21, #0x30]\n"
+ "fmul v31.4s, v17.4s, v20.s[1]\n"
+ ".inst 0x4f8ce0da // sdot v26.4s, v6.16b, v12.4b[0]\n"
+ ".inst 0x4face0ca // sdot v10.4s, v6.16b, v12.4b[1]\n"
+ ".inst 0x4f8ce8c2 // sdot v2.4s, v6.16b, v12.4b[2]\n"
+ ".inst 0x4face8dd // sdot v29.4s, v6.16b, v12.4b[3]\n"
+ "ldr q12, [x21, #0x40]\n"
+ "fmul v6.4s, v17.4s, v20.s[2]\n"
+ "fmul v20.4s, v17.4s, v20.s[3]\n"
+ ".inst 0x4f89e39a // sdot v26.4s, v28.16b, v9.4b[0]\n"
+ ".inst 0x4fa9e38a // sdot v10.4s, v28.16b, v9.4b[1]\n"
+ ".inst 0x4f89eb82 // sdot v2.4s, v28.16b, v9.4b[2]\n"
+ ".inst 0x4fa9eb9d // sdot v29.4s, v28.16b, v9.4b[3]\n"
+ "ldr q9, [x21, #0x50]\n"
+ ".inst 0x4f8ce07a // sdot v26.4s, v3.16b, v12.4b[0]\n"
+ ".inst 0x4face06a // sdot v10.4s, v3.16b, v12.4b[1]\n"
+ ".inst 0x4f8ce862 // sdot v2.4s, v3.16b, v12.4b[2]\n"
+ ".inst 0x4face87d // sdot v29.4s, v3.16b, v12.4b[3]\n"
+ "ldr q12, [x21, #0x60]\n"
+ ".inst 0x4f89e2da // sdot v26.4s, v22.16b, v9.4b[0]\n"
+ ".inst 0x4fa9e2ca // sdot v10.4s, v22.16b, v9.4b[1]\n"
+ ".inst 0x4f89eac2 // sdot v2.4s, v22.16b, v9.4b[2]\n"
+ ".inst 0x4fa9eadd // sdot v29.4s, v22.16b, v9.4b[3]\n"
+ "ldr q17, [x21, #0x70]\n"
+ "add x21, x21, #0x88\n"
+ ".inst 0x4f8ce37a // sdot v26.4s, v27.16b, v12.4b[0]\n"
+ ".inst 0x4face36a // sdot v10.4s, v27.16b, v12.4b[1]\n"
+ ".inst 0x4f8ceb62 // sdot v2.4s, v27.16b, v12.4b[2]\n"
+ ".inst 0x4faceb7d // sdot v29.4s, v27.16b, v12.4b[3]\n"
+ ".inst 0x4f91e3da // sdot v26.4s, v30.16b, v17.4b[0]\n"
+ ".inst 0x4fb1e3ca // sdot v10.4s, v30.16b, v17.4b[1]\n"
+ ".inst 0x4f91ebc2 // sdot v2.4s, v30.16b, v17.4b[2]\n"
+ ".inst 0x4fb1ebdd // sdot v29.4s, v30.16b, v17.4b[3]\n"
+ "scvtf v26.4s, v26.4s, #0x4\n"
+ "scvtf v10.4s, v10.4s, #0x4\n"
+ "fmla v5.4s, v26.4s, v24.4s\n"
+ "scvtf v2.4s, v2.4s, #0x4\n"
+ "scvtf v29.4s, v29.4s, #0x4\n"
+ "fmla v21.4s, v10.4s, v31.4s\n"
+ "fmla v8.4s, v2.4s, v6.4s\n"
+ "fmla v1.4s, v29.4s, v20.4s\n"
+ "bgt 3b\n"
+ "mov x20, %x[res_ptr]\n"
+ "subs x27, x27, #0x4\n"
+ "add %x[res_ptr], %x[res_ptr], #0x10\n"
+ "str q15, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q19, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q18, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q14, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q11, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q13, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q23, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q16, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q25, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q7, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q0, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q4, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q5, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q21, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q8, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q1, [x20, #0x0]\n"
+ "bne 2b\n"
+ "mov x20, #0x4\n"
+ "sub x10, x10, #0x10\n"
+ "cmp x10, #0x10\n"
+ "mov %x[res_ptr], x26\n"
+ "madd %x[a_ptr], x20, x9, %x[a_ptr]\n"
+ "bge 1b\n"
+ "4:" // Row loop skip
+ "cbz x10, 9f\n"
+ "5:" // Row tail: Row loop
+ "add x24, %x[b_ptr], #0x8\n"
+ "mov x23, %x[nc]\n"
+ "add x22, %x[res_ptr], %x[res_stride], LSL #2\n"
+ "6:" // Row tail: Column loop
+ "movi v15.16b, #0x0\n"
+ "movi v19.16b, #0x0\n"
+ "add x25, %x[a_ptr], #0x8\n"
+ "mov x21, %x[nb]\n"
+ "movi v18.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "7:" // Row tail: Block loop
+ "ldr q7, [x24, #0x0]\n"
+ "ldr q5, [x25, #0x0]\n"
+ "movi v9.16b, #0x4\n"
+ "movi v4.4s, #0x0\n"
+ "ldr q3, [x24, #0x10]\n"
+ "ldr q2, [x25, #0x10]\n"
+ "movi v1.4s, #0x0\n"
+ "movi v0.4s, #0x0\n"
+ "ldr q13, [x24, #0x20]\n"
+ "ldr q31, [x25, #0x20]\n"
+ "movi v30.4s, #0x0\n"
+ "movi v29.16b, #0xf0\n"
+ "ldr q28, [x24, #0x30]\n"
+ "ldr q27, [x25, #0x30]\n"
+ "sshl v20.16b, v7.16b, v9.16b\n"
+ "sub x20, x24, #0x8\n"
+ "ldr q26, [x25, #0x40]\n"
+ "ldr q25, [x25, #0x50]\n"
+ "sshl v17.16b, v3.16b, v9.16b\n"
+ "and v7.16b, v7.16b, v29.16b\n"
+ "ldr q24, [x25, #0x60]\n"
+ "ldr q16, [x25, #0x70]\n"
+ "sshl v22.16b, v13.16b, v9.16b\n"
+ "and v3.16b, v3.16b, v29.16b\n"
+ "ldr d21, [x20, #0x0]\n"
+ "ldr d12, [x25, #-0x8]\n"
+ ".inst 0x4f85e284 // sdot v4.4s, v20.16b, v5.4b[0]\n"
+ ".inst 0x4fa5e281 // sdot v1.4s, v20.16b, v5.4b[1]\n"
+ ".inst 0x4f85ea80 // sdot v0.4s, v20.16b, v5.4b[2]\n"
+ ".inst 0x4fa5ea9e // sdot v30.4s, v20.16b, v5.4b[3]\n"
+ "sshl v9.16b, v28.16b, v9.16b\n"
+ "subs x21, x21, #0x1\n"
+ "and v13.16b, v13.16b, v29.16b\n"
+ "and v28.16b, v28.16b, v29.16b\n"
+ "add x25, x25, #0x88\n"
+ "add x24, x24, #0x48\n"
+ "fcvtl v21.4s, v21.4h\n"
+ "fcvtl v12.4s, v12.4h\n"
+ ".inst 0x4f82e224 // sdot v4.4s, v17.16b, v2.4b[0]\n"
+ ".inst 0x4fa2e221 // sdot v1.4s, v17.16b, v2.4b[1]\n"
+ ".inst 0x4f82ea20 // sdot v0.4s, v17.16b, v2.4b[2]\n"
+ ".inst 0x4fa2ea3e // sdot v30.4s, v17.16b, v2.4b[3]\n"
+ "fmul v11.4s, v21.4s, v12.s[0]\n"
+ "fmul v23.4s, v21.4s, v12.s[1]\n"
+ "fmul v17.4s, v21.4s, v12.s[2]\n"
+ ".inst 0x4f9fe2c4 // sdot v4.4s, v22.16b, v31.4b[0]\n"
+ "fmul v6.4s, v21.4s, v12.s[3]\n"
+ ".inst 0x4fbfe2c1 // sdot v1.4s, v22.16b, v31.4b[1]\n"
+ ".inst 0x4f9feac0 // sdot v0.4s, v22.16b, v31.4b[2]\n"
+ ".inst 0x4fbfeade // sdot v30.4s, v22.16b, v31.4b[3]\n"
+ ".inst 0x4f9be124 // sdot v4.4s, v9.16b, v27.4b[0]\n"
+ ".inst 0x4fbbe121 // sdot v1.4s, v9.16b, v27.4b[1]\n"
+ ".inst 0x4f9be920 // sdot v0.4s, v9.16b, v27.4b[2]\n"
+ ".inst 0x4fbbe93e // sdot v30.4s, v9.16b, v27.4b[3]\n"
+ ".inst 0x4f9ae0e4 // sdot v4.4s, v7.16b, v26.4b[0]\n"
+ ".inst 0x4fbae0e1 // sdot v1.4s, v7.16b, v26.4b[1]\n"
+ ".inst 0x4f9ae8e0 // sdot v0.4s, v7.16b, v26.4b[2]\n"
+ ".inst 0x4fbae8fe // sdot v30.4s, v7.16b, v26.4b[3]\n"
+ ".inst 0x4f99e064 // sdot v4.4s, v3.16b, v25.4b[0]\n"
+ ".inst 0x4fb9e061 // sdot v1.4s, v3.16b, v25.4b[1]\n"
+ ".inst 0x4f99e860 // sdot v0.4s, v3.16b, v25.4b[2]\n"
+ ".inst 0x4fb9e87e // sdot v30.4s, v3.16b, v25.4b[3]\n"
+ ".inst 0x4f98e1a4 // sdot v4.4s, v13.16b, v24.4b[0]\n"
+ ".inst 0x4fb8e1a1 // sdot v1.4s, v13.16b, v24.4b[1]\n"
+ ".inst 0x4f98e9a0 // sdot v0.4s, v13.16b, v24.4b[2]\n"
+ ".inst 0x4fb8e9be // sdot v30.4s, v13.16b, v24.4b[3]\n"
+ ".inst 0x4f90e384 // sdot v4.4s, v28.16b, v16.4b[0]\n"
+ ".inst 0x4fb0e381 // sdot v1.4s, v28.16b, v16.4b[1]\n"
+ ".inst 0x4f90eb80 // sdot v0.4s, v28.16b, v16.4b[2]\n"
+ ".inst 0x4fb0eb9e // sdot v30.4s, v28.16b, v16.4b[3]\n"
+ "scvtf v4.4s, v4.4s, #0x4\n"
+ "scvtf v1.4s, v1.4s, #0x4\n"
+ "scvtf v0.4s, v0.4s, #0x4\n"
+ "fmla v15.4s, v4.4s, v11.4s\n"
+ "scvtf v30.4s, v30.4s, #0x4\n"
+ "fmla v19.4s, v1.4s, v23.4s\n"
+ "fmla v18.4s, v0.4s, v17.4s\n"
+ "fmla v14.4s, v30.4s, v6.4s\n"
+ "bgt 7b\n"
+ "mov x20, %x[res_ptr]\n"
+ "cmp x10, #0x1\n"
+ "str q15, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "cmp x10, #0x2\n"
+ "str q19, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "cmp x10, #0x3\n"
+ "str q18, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "str q14, [x20, #0x0]\n"
+ "8:" // Row tail: Accumulator store skip
+ "subs x23, x23, #0x4\n"
+ "add %x[res_ptr], %x[res_ptr], #0x10\n"
+ "bne 6b\n"
+ "subs x10, x10, #0x4\n"
+ "add %x[a_ptr], %x[a_ptr], x9\n"
+ "mov %x[res_ptr], x22\n"
+ "bgt 5b\n"
+ "9:" // Row tail: Row loop skip
+ : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr)
+ : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+ return;
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
+ ggml_gemm_q4_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q4_0_4x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 8;
+
+ assert (n % qk == 0);
+ assert (nr % 4 == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ const void * b_ptr = vx;
+ const void * a_ptr = vy;
+ float * res_ptr = s;
+ size_t res_stride = bs * sizeof(float);
+
+ __asm__ __volatile__(
+ "mov x10, %x[nr]\n"
+ "mov x9, #0x88\n"
+ "cmp x10, #0x10\n"
+ "mul x9, %x[nb], x9\n"
+ "blt 4f\n"
+ "1:" // Row loop
+ "add x28, %x[b_ptr], #0x8\n"
+ "mov x27, %x[nc]\n"
+ "add x26, %x[res_ptr], %x[res_stride], LSL #4\n"
+ "2:" // Column loop
+ "add x25, %x[a_ptr], #0x8\n"
+ "movi v2.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "mov x24, %x[nb]\n"
+ "add x23, x25, x9\n"
+ "movi v12.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "add x22, x23, x9\n"
+ "movi v11.16b, #0x0\n"
+ "movi v13.16b, #0x0\n"
+ "add x21, x22, x9\n"
+ "movi v22.16b, #0x0\n"
+ "movi v23.16b, #0x0\n"
+ "movi v25.16b, #0x0\n"
+ "movi v5.16b, #0x0\n"
+ "movi v7.16b, #0x0\n"
+ "movi v4.16b, #0x0\n"
+ "movi v6.16b, #0x0\n"
+ "movi v30.16b, #0x0\n"
+ "movi v24.16b, #0x0\n"
+ "movi v14.16b, #0x0\n"
+ "3:" // Block loop
+ "ldr q21, [x28, #0x0]\n"
+ "ldr q16, [x28, #0x10]\n"
+ "movi v1.16b, #0x4\n"
+ "movi v19.4s, #0x0\n"
+ "ldr q27, [x25, #0x0]\n"
+ "ldr q15, [x25, #0x10]\n"
+ "movi v26.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ "ldr q29, [x28, #0x20]\n"
+ "ldr q3, [x28, #0x30]\n"
+ "movi v17.4s, #0x0\n"
+ "movi v0.16b, #0xf0\n"
+ "ldr d20, [x25, #-0x8]\n"
+ "ldr d9, [x23, #-0x8]\n"
+ "sshl v8.16b, v21.16b, v1.16b\n"
+ "sshl v31.16b, v16.16b, v1.16b\n"
+ "and v21.16b, v21.16b, v0.16b\n"
+ "and v16.16b, v16.16b, v0.16b\n"
+ "sub x20, x28, #0x8\n"
+ "subs x24, x24, #0x1\n"
+ "add x28, x28, #0x48\n"
+ ".inst 0x4e88a773 // smmla v19.4s, v27.16b, v8.16b\n"
+ ".inst 0x4e9fa77a // smmla v26.4s, v27.16b, v31.16b\n"
+ "ldr q27, [x25, #0x20]\n"
+ ".inst 0x4e88a5f2 // smmla v18.4s, v15.16b, v8.16b\n"
+ ".inst 0x4e9fa5f1 // smmla v17.4s, v15.16b, v31.16b\n"
+ "sshl v15.16b, v29.16b, v1.16b\n"
+ "sshl v1.16b, v3.16b, v1.16b\n"
+ "and v29.16b, v29.16b, v0.16b\n"
+ "and v3.16b, v3.16b, v0.16b\n"
+ "ldr q0, [x25, #0x30]\n"
+ "fcvtl v20.4s, v20.4h\n"
+ ".inst 0x4e8fa773 // smmla v19.4s, v27.16b, v15.16b\n"
+ "fcvtl v9.4s, v9.4h\n"
+ ".inst 0x4e81a77a // smmla v26.4s, v27.16b, v1.16b\n"
+ "ldr q27, [x25, #0x40]\n"
+ ".inst 0x4e8fa412 // smmla v18.4s, v0.16b, v15.16b\n"
+ ".inst 0x4e81a411 // smmla v17.4s, v0.16b, v1.16b\n"
+ "ldr q0, [x25, #0x50]\n"
+ ".inst 0x4e95a773 // smmla v19.4s, v27.16b, v21.16b\n"
+ ".inst 0x4e90a77a // smmla v26.4s, v27.16b, v16.16b\n"
+ "ldr q27, [x25, #0x60]\n"
+ ".inst 0x4e95a412 // smmla v18.4s, v0.16b, v21.16b\n"
+ ".inst 0x4e90a411 // smmla v17.4s, v0.16b, v16.16b\n"
+ "ldr q0, [x25, #0x70]\n"
+ "add x25, x25, #0x88\n"
+ ".inst 0x4e9da773 // smmla v19.4s, v27.16b, v29.16b\n"
+ ".inst 0x4e83a77a // smmla v26.4s, v27.16b, v3.16b\n"
+ "ldr d27, [x20, #0x0]\n"
+ ".inst 0x4e9da412 // smmla v18.4s, v0.16b, v29.16b\n"
+ ".inst 0x4e83a411 // smmla v17.4s, v0.16b, v3.16b\n"
+ "fcvtl v27.4s, v27.4h\n"
+ "uzp1 v0.2d, v19.2d, v26.2d\n"
+ "uzp2 v26.2d, v19.2d, v26.2d\n"
+ "fmul v19.4s, v27.4s, v20.s[0]\n"
+ "scvtf v0.4s, v0.4s, #0x4\n"
+ "scvtf v26.4s, v26.4s, #0x4\n"
+ "fmla v2.4s, v0.4s, v19.4s\n"
+ "ldr q19, [x23, #0x0]\n"
+ "uzp1 v0.2d, v18.2d, v17.2d\n"
+ "uzp2 v18.2d, v18.2d, v17.2d\n"
+ "fmul v17.4s, v27.4s, v20.s[1]\n"
+ "scvtf v0.4s, v0.4s, #0x4\n"
+ "scvtf v18.4s, v18.4s, #0x4\n"
+ "fmla v10.4s, v26.4s, v17.4s\n"
+ "ldr q17, [x23, #0x10]\n"
+ "fmul v26.4s, v27.4s, v20.s[2]\n"
+ "fmul v20.4s, v27.4s, v20.s[3]\n"
+ "fmla v12.4s, v0.4s, v26.4s\n"
+ "ldr d0, [x22, #-0x8]\n"
+ "ldr d26, [x21, #-0x8]\n"
+ "fcvtl v0.4s, v0.4h\n"
+ "fmla v28.4s, v18.4s, v20.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n"
+ ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n"
+ "ldr q19, [x23, #0x20]\n"
+ "fcvtl v26.4s, v26.4h\n"
+ ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n"
+ ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n"
+ "ldr q19, [x23, #0x40]\n"
+ ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n"
+ ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n"
+ "ldr q19, [x23, #0x60]\n"
+ ".inst 0x4e9da674 // smmla v20.4s, v19.16b, v29.16b\n"
+ ".inst 0x4e83a672 // smmla v18.4s, v19.16b, v3.16b\n"
+ "uzp1 v19.2d, v20.2d, v18.2d\n"
+ "scvtf v19.4s, v19.4s, #0x4\n"
+ "uzp2 v20.2d, v20.2d, v18.2d\n"
+ "fmul v18.4s, v27.4s, v9.s[0]\n"
+ "scvtf v20.4s, v20.4s, #0x4\n"
+ "fmla v11.4s, v19.4s, v18.4s\n"
+ "ldr q18, [x22, #0x0]\n"
+ "fmul v19.4s, v27.4s, v9.s[1]\n"
+ "fmla v13.4s, v20.4s, v19.4s\n"
+ "movi v19.4s, #0x0\n"
+ "movi v20.4s, #0x0\n"
+ ".inst 0x4e88a633 // smmla v19.4s, v17.16b, v8.16b\n"
+ ".inst 0x4e9fa634 // smmla v20.4s, v17.16b, v31.16b\n"
+ "ldr q17, [x23, #0x30]\n"
+ ".inst 0x4e8fa633 // smmla v19.4s, v17.16b, v15.16b\n"
+ ".inst 0x4e81a634 // smmla v20.4s, v17.16b, v1.16b\n"
+ "ldr q17, [x23, #0x50]\n"
+ ".inst 0x4e95a633 // smmla v19.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e90a634 // smmla v20.4s, v17.16b, v16.16b\n"
+ "ldr q17, [x23, #0x70]\n"
+ "add x23, x23, #0x88\n"
+ ".inst 0x4e9da633 // smmla v19.4s, v17.16b, v29.16b\n"
+ ".inst 0x4e83a634 // smmla v20.4s, v17.16b, v3.16b\n"
+ "uzp1 v17.2d, v19.2d, v20.2d\n"
+ "scvtf v17.4s, v17.4s, #0x4\n"
+ "uzp2 v20.2d, v19.2d, v20.2d\n"
+ "fmul v19.4s, v27.4s, v9.s[2]\n"
+ "fmul v9.4s, v27.4s, v9.s[3]\n"
+ "scvtf v20.4s, v20.4s, #0x4\n"
+ "fmla v22.4s, v17.4s, v19.4s\n"
+ "ldr q17, [x22, #0x10]\n"
+ "movi v19.4s, #0x0\n"
+ ".inst 0x4e88a653 // smmla v19.4s, v18.16b, v8.16b\n"
+ "fmla v23.4s, v20.4s, v9.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v9.4s, #0x0\n"
+ ".inst 0x4e9fa654 // smmla v20.4s, v18.16b, v31.16b\n"
+ "ldr q18, [x22, #0x20]\n"
+ ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n"
+ ".inst 0x4e8fa653 // smmla v19.4s, v18.16b, v15.16b\n"
+ ".inst 0x4e81a654 // smmla v20.4s, v18.16b, v1.16b\n"
+ "ldr q18, [x22, #0x40]\n"
+ ".inst 0x4e95a653 // smmla v19.4s, v18.16b, v21.16b\n"
+ ".inst 0x4e90a654 // smmla v20.4s, v18.16b, v16.16b\n"
+ "ldr q18, [x22, #0x60]\n"
+ ".inst 0x4e9da653 // smmla v19.4s, v18.16b, v29.16b\n"
+ ".inst 0x4e83a654 // smmla v20.4s, v18.16b, v3.16b\n"
+ "movi v18.4s, #0x0\n"
+ ".inst 0x4e9fa632 // smmla v18.4s, v17.16b, v31.16b\n"
+ "ldr q17, [x22, #0x30]\n"
+ ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n"
+ ".inst 0x4e81a632 // smmla v18.4s, v17.16b, v1.16b\n"
+ "ldr q17, [x22, #0x50]\n"
+ ".inst 0x4e95a629 // smmla v9.4s, v17.16b, v21.16b\n"
+ ".inst 0x4e90a632 // smmla v18.4s, v17.16b, v16.16b\n"
+ "ldr q17, [x22, #0x70]\n"
+ "add x22, x22, #0x88\n"
+ ".inst 0x4e9da629 // smmla v9.4s, v17.16b, v29.16b\n"
+ ".inst 0x4e83a632 // smmla v18.4s, v17.16b, v3.16b\n"
+ "uzp1 v17.2d, v19.2d, v20.2d\n"
+ "uzp2 v20.2d, v19.2d, v20.2d\n"
+ "fmul v19.4s, v27.4s, v0.s[0]\n"
+ "scvtf v17.4s, v17.4s, #0x4\n"
+ "scvtf v20.4s, v20.4s, #0x4\n"
+ "fmla v25.4s, v17.4s, v19.4s\n"
+ "ldr q19, [x21, #0x0]\n"
+ "fmul v17.4s, v27.4s, v0.s[1]\n"
+ "fmla v5.4s, v20.4s, v17.4s\n"
+ "ldr q17, [x21, #0x10]\n"
+ "uzp1 v20.2d, v9.2d, v18.2d\n"
+ "uzp2 v9.2d, v9.2d, v18.2d\n"
+ "fmul v18.4s, v27.4s, v0.s[2]\n"
+ "fmul v0.4s, v27.4s, v0.s[3]\n"
+ "scvtf v20.4s, v20.4s, #0x4\n"
+ "scvtf v9.4s, v9.4s, #0x4\n"
+ "fmla v7.4s, v20.4s, v18.4s\n"
+ "movi v20.4s, #0x0\n"
+ "movi v18.4s, #0x0\n"
+ ".inst 0x4e88a674 // smmla v20.4s, v19.16b, v8.16b\n"
+ ".inst 0x4e9fa672 // smmla v18.4s, v19.16b, v31.16b\n"
+ "ldr q19, [x21, #0x20]\n"
+ "fmla v4.4s, v9.4s, v0.4s\n"
+ "movi v9.4s, #0x0\n"
+ "movi v0.4s, #0x0\n"
+ ".inst 0x4e88a629 // smmla v9.4s, v17.16b, v8.16b\n"
+ "fmul v8.4s, v27.4s, v26.s[0]\n"
+ ".inst 0x4e9fa620 // smmla v0.4s, v17.16b, v31.16b\n"
+ "ldr q17, [x21, #0x30]\n"
+ ".inst 0x4e8fa674 // smmla v20.4s, v19.16b, v15.16b\n"
+ "fmul v31.4s, v27.4s, v26.s[1]\n"
+ ".inst 0x4e81a672 // smmla v18.4s, v19.16b, v1.16b\n"
+ "ldr q19, [x21, #0x40]\n"
+ ".inst 0x4e8fa629 // smmla v9.4s, v17.16b, v15.16b\n"
+ "fmul v15.4s, v27.4s, v26.s[2]\n"
+ "fmul v27.4s, v27.4s, v26.s[3]\n"
+ ".inst 0x4e81a620 // smmla v0.4s, v17.16b, v1.16b\n"
+ "ldr q1, [x21, #0x50]\n"
+ ".inst 0x4e95a674 // smmla v20.4s, v19.16b, v21.16b\n"
+ ".inst 0x4e90a672 // smmla v18.4s, v19.16b, v16.16b\n"
+ "ldr q26, [x21, #0x60]\n"
+ ".inst 0x4e95a429 // smmla v9.4s, v1.16b, v21.16b\n"
+ ".inst 0x4e90a420 // smmla v0.4s, v1.16b, v16.16b\n"
+ "ldr q21, [x21, #0x70]\n"
+ "add x21, x21, #0x88\n"
+ ".inst 0x4e9da754 // smmla v20.4s, v26.16b, v29.16b\n"
+ ".inst 0x4e83a752 // smmla v18.4s, v26.16b, v3.16b\n"
+ ".inst 0x4e9da6a9 // smmla v9.4s, v21.16b, v29.16b\n"
+ ".inst 0x4e83a6a0 // smmla v0.4s, v21.16b, v3.16b\n"
+ "uzp1 v29.2d, v20.2d, v18.2d\n"
+ "uzp2 v21.2d, v20.2d, v18.2d\n"
+ "scvtf v29.4s, v29.4s, #0x4\n"
+ "uzp1 v18.2d, v9.2d, v0.2d\n"
+ "uzp2 v16.2d, v9.2d, v0.2d\n"
+ "scvtf v21.4s, v21.4s, #0x4\n"
+ "fmla v6.4s, v29.4s, v8.4s\n"
+ "scvtf v18.4s, v18.4s, #0x4\n"
+ "scvtf v16.4s, v16.4s, #0x4\n"
+ "fmla v30.4s, v21.4s, v31.4s\n"
+ "fmla v24.4s, v18.4s, v15.4s\n"
+ "fmla v14.4s, v16.4s, v27.4s\n"
+ "bgt 3b\n"
+ "mov x20, %x[res_ptr]\n"
+ "subs x27, x27, #0x4\n"
+ "add %x[res_ptr], %x[res_ptr], #0x10\n"
+ "str q2, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q10, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q12, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q28, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q11, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q13, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q22, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q23, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q25, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q5, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q7, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q4, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q6, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q30, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q24, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "str q14, [x20, #0x0]\n"
+ "bne 2b\n"
+ "mov x20, #0x4\n"
+ "sub x10, x10, #0x10\n"
+ "cmp x10, #0x10\n"
+ "mov %x[res_ptr], x26\n"
+ "madd %x[a_ptr], x20, x9, %x[a_ptr]\n"
+ "bge 1b\n"
+ "4:" // Row loop skip
+ "cbz x10, 9f\n"
+ "5:" // Row tail: Row loop
+ "add x24, %x[b_ptr], #0x8\n"
+ "mov x23, %x[nc]\n"
+ "add x22, %x[res_ptr], %x[res_stride], LSL #2\n"
+ "6:" // Row tail: Column loop
+ "movi v2.16b, #0x0\n"
+ "movi v10.16b, #0x0\n"
+ "add x25, %x[a_ptr], #0x8\n"
+ "mov x21, %x[nb]\n"
+ "movi v12.16b, #0x0\n"
+ "movi v28.16b, #0x0\n"
+ "7:" // Row tail: Block loop
+ "ldr q6, [x24, #0x0]\n"
+ "ldr q5, [x24, #0x10]\n"
+ "movi v17.16b, #0x4\n"
+ "movi v8.4s, #0x0\n"
+ "ldr q4, [x25, #0x0]\n"
+ "ldr q13, [x25, #0x10]\n"
+ "movi v27.4s, #0x0\n"
+ "movi v0.4s, #0x0\n"
+ "ldr q31, [x24, #0x20]\n"
+ "ldr q14, [x24, #0x30]\n"
+ "movi v29.4s, #0x0\n"
+ "movi v22.16b, #0xf0\n"
+ "ldr q11, [x25, #0x20]\n"
+ "ldr q23, [x25, #0x30]\n"
+ "sshl v21.16b, v6.16b, v17.16b\n"
+ "sshl v16.16b, v5.16b, v17.16b\n"
+ "ldr q20, [x25, #0x40]\n"
+ "ldr q26, [x25, #0x50]\n"
+ "and v6.16b, v6.16b, v22.16b\n"
+ "and v5.16b, v5.16b, v22.16b\n"
+ "ldr q25, [x25, #0x60]\n"
+ "ldr q3, [x25, #0x70]\n"
+ "sshl v19.16b, v31.16b, v17.16b\n"
+ "sshl v18.16b, v14.16b, v17.16b\n"
+ "ldr d17, [x25, #-0x8]\n"
+ ".inst 0x4e95a488 // smmla v8.4s, v4.16b, v21.16b\n"
+ ".inst 0x4e90a49b // smmla v27.4s, v4.16b, v16.16b\n"
+ "and v31.16b, v31.16b, v22.16b\n"
+ ".inst 0x4e95a5a0 // smmla v0.4s, v13.16b, v21.16b\n"
+ ".inst 0x4e90a5bd // smmla v29.4s, v13.16b, v16.16b\n"
+ "and v14.16b, v14.16b, v22.16b\n"
+ "sub x20, x24, #0x8\n"
+ "ldr d16, [x20, #0x0]\n"
+ "subs x21, x21, #0x1\n"
+ "add x25, x25, #0x88\n"
+ "fcvtl v17.4s, v17.4h\n"
+ "add x24, x24, #0x48\n"
+ ".inst 0x4e93a568 // smmla v8.4s, v11.16b, v19.16b\n"
+ ".inst 0x4e92a57b // smmla v27.4s, v11.16b, v18.16b\n"
+ ".inst 0x4e93a6e0 // smmla v0.4s, v23.16b, v19.16b\n"
+ ".inst 0x4e92a6fd // smmla v29.4s, v23.16b, v18.16b\n"
+ "fcvtl v16.4s, v16.4h\n"
+ ".inst 0x4e86a688 // smmla v8.4s, v20.16b, v6.16b\n"
+ ".inst 0x4e85a69b // smmla v27.4s, v20.16b, v5.16b\n"
+ "fmul v23.4s, v16.4s, v17.s[0]\n"
+ "fmul v21.4s, v16.4s, v17.s[1]\n"
+ "fmul v1.4s, v16.4s, v17.s[2]\n"
+ "fmul v20.4s, v16.4s, v17.s[3]\n"
+ ".inst 0x4e86a740 // smmla v0.4s, v26.16b, v6.16b\n"
+ ".inst 0x4e85a75d // smmla v29.4s, v26.16b, v5.16b\n"
+ ".inst 0x4e9fa728 // smmla v8.4s, v25.16b, v31.16b\n"
+ ".inst 0x4e8ea73b // smmla v27.4s, v25.16b, v14.16b\n"
+ ".inst 0x4e9fa460 // smmla v0.4s, v3.16b, v31.16b\n"
+ ".inst 0x4e8ea47d // smmla v29.4s, v3.16b, v14.16b\n"
+ "uzp1 v19.2d, v8.2d, v27.2d\n"
+ "uzp2 v18.2d, v8.2d, v27.2d\n"
+ "scvtf v19.4s, v19.4s, #0x4\n"
+ "uzp1 v17.2d, v0.2d, v29.2d\n"
+ "uzp2 v16.2d, v0.2d, v29.2d\n"
+ "scvtf v18.4s, v18.4s, #0x4\n"
+ "fmla v2.4s, v19.4s, v23.4s\n"
+ "scvtf v17.4s, v17.4s, #0x4\n"
+ "scvtf v16.4s, v16.4s, #0x4\n"
+ "fmla v10.4s, v18.4s, v21.4s\n"
+ "fmla v12.4s, v17.4s, v1.4s\n"
+ "fmla v28.4s, v16.4s, v20.4s\n"
+ "bgt 7b\n"
+ "mov x20, %x[res_ptr]\n"
+ "cmp x10, #0x1\n"
+ "str q2, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "cmp x10, #0x2\n"
+ "str q10, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "cmp x10, #0x3\n"
+ "str q12, [x20, #0x0]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "str q28, [x20, #0x0]\n"
+ "8:" // Row tail: Accumulator store skip
+ "subs x23, x23, #0x4\n"
+ "add %x[res_ptr], %x[res_ptr], #0x10\n"
+ "bne 6b\n"
+ "subs x10, x10, #0x4\n"
+ "add %x[a_ptr], %x[a_ptr], x9\n"
+ "mov %x[res_ptr], x22\n"
+ "bgt 5b\n"
+ "9:" // Row tail: Row loop skip
+ : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr)
+ : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc)
+ : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", "x9", "x10", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28"
+ );
+ return;
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ ggml_gemm_q4_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q4_0_8x8_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 8;
+ const int blocklen = 8;
+
+ assert (n % qk == 0);
+ assert (nr % 4 == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
+#if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
+ if (ggml_cpu_get_sve_cnt() == QK8_0) {
+ const void * b_ptr = vx;
+ const void * a_ptr = vy;
+ float * res_ptr = s;
+ size_t res_stride = bs * sizeof(float);
+
+ __asm__ __volatile__(
+ "mov x20, #0x4\n"
+ "mov x13, %x[nr]\n"
+ "mov z28.s, #-0x4\n"
+ "mov x12, #0x88\n"
+ "ptrue p1.b\n"
+ "whilelt p0.s, XZR, x20\n"
+ "cmp x13, #0x10\n"
+ "mul x12, %x[nb], x12\n"
+ "blt 4f\n"
+ "1:" // Row loop
+ "add x11, %x[b_ptr], #0x10\n"
+ "mov x10, %x[nc]\n"
+ "add x9, %x[res_ptr], %x[res_stride], LSL #4\n"
+ "2:" // Column loop
+ "add x28, %x[a_ptr], #0x8\n"
+ "mov z24.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "mov x27, %x[nb]\n"
+ "add x26, x28, x12\n"
+ "mov z12.b, #0x0\n"
+ "mov z0.b, #0x0\n"
+ "add x25, x26, x12\n"
+ "mov z13.b, #0x0\n"
+ "mov z1.b, #0x0\n"
+ "add x24, x25, x12\n"
+ "mov z20.b, #0x0\n"
+ "mov z25.b, #0x0\n"
+ "mov z11.b, #0x0\n"
+ "mov z16.b, #0x0\n"
+ "mov z19.b, #0x0\n"
+ "mov z26.b, #0x0\n"
+ "mov z8.b, #0x0\n"
+ "mov z29.b, #0x0\n"
+ "mov z27.b, #0x0\n"
+ "mov z10.b, #0x0\n"
+ "3:" // Block loop
+ "ld1b { z30.b }, p1/Z, [x11]\n"
+ "ld1b { z21.b }, p1/Z, [x11, #1, MUL VL]\n"
+ "mov z18.s, #0x0\n"
+ "mov z7.s, #0x0\n"
+ "ld1rqb { z3.b }, p1/Z, [x28]\n"
+ "ld1rqb { z5.b }, p1/Z, [x28, #16]\n"
+ "mov z9.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ "ld1b { z4.b }, p1/Z, [x11, #2, MUL VL]\n"
+ "ld1b { z17.b }, p1/Z, [x11, #3, MUL VL]\n"
+ "sub x20, x11, #0x10\n"
+ "sub x23, x28, #0x8\n"
+ "lsl z31.b, z30.b, #0x4\n"
+ "lsl z6.b, z21.b, #0x4\n"
+ "ld1h { z23.s }, p1/Z, [x20]\n"
+ "sub x22, x26, #0x8\n"
+ "and z30.b, z30.b, #0xf0\n"
+ "and z21.b, z21.b, #0xf0\n"
+ "sub x21, x25, #0x8\n"
+ "sub x20, x24, #0x8\n"
+ "lsl z14.b, z4.b, #0x4\n"
+ "lsl z2.b, z17.b, #0x4\n"
+ "subs x27, x27, #0x1\n"
+ "add x11, x11, #0x90\n"
+ ".inst 0x451f9872 // smmla z18.s, z3.b, z31.b\n"
+ ".inst 0x45069867 // smmla z7.s, z3.b, z6.b\n"
+ "ld1rqb { z3.b }, p1/Z, [x28, #32]\n"
+ "and z4.b, z4.b, #0xf0\n"
+ ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n"
+ ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n"
+ "ld1rqb { z5.b }, p1/Z, [x28, #48]\n"
+ "and z17.b, z17.b, #0xf0\n"
+ "fcvt z23.s, p1/m, z23.h\n"
+ ".inst 0x450e9872 // smmla z18.s, z3.b, z14.b\n"
+ ".inst 0x45029867 // smmla z7.s, z3.b, z2.b\n"
+ "ld1rqb { z3.b }, p1/Z, [x28, #64]\n"
+ ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n"
+ ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n"
+ "ld1rqb { z5.b }, p1/Z, [x28, #80]\n"
+ "fscale z23.s, p1/m, z23.s, z28.s\n"
+ ".inst 0x451e9872 // smmla z18.s, z3.b, z30.b\n"
+ ".inst 0x45159867 // smmla z7.s, z3.b, z21.b\n"
+ "ld1rqb { z3.b }, p1/Z, [x28, #96]\n"
+ ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n"
+ ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n"
+ "ld1rqb { z5.b }, p1/Z, [x28, #112]\n"
+ "add x28, x28, #0x88\n"
+ ".inst 0x45049872 // smmla z18.s, z3.b, z4.b\n"
+ ".inst 0x45119867 // smmla z7.s, z3.b, z17.b\n"
+ "ld1h { z3.s }, p0/Z, [x23]\n"
+ ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n"
+ ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n"
+ "fcvt z3.s, p1/m, z3.h\n"
+ "uzp1 z5.d, z18.d, z7.d\n"
+ "uzp2 z18.d, z18.d, z7.d\n"
+ "mov z3.q, z3.q[0]\n"
+ "uzp1 z7.d, z9.d, z22.d\n"
+ "uzp2 z22.d, z9.d, z22.d\n"
+ "fmul z9.s, z23.s, z3.s[0]\n"
+ "scvtf z5.s, p1/m, z5.s\n"
+ "scvtf z18.s, p1/m, z18.s\n"
+ "scvtf z7.s, p1/m, z7.s\n"
+ "scvtf z22.s, p1/m, z22.s\n"
+ "fmla z24.s, p1/M, z5.s, z9.s\n"
+ "ld1rqb { z5.b }, p1/Z, [x26]\n"
+ "fmul z9.s, z23.s, z3.s[1]\n"
+ "fmla z15.s, p1/M, z18.s, z9.s\n"
+ "ld1rqb { z18.b }, p1/Z, [x26, #16]\n"
+ "fmul z9.s, z23.s, z3.s[2]\n"
+ "fmul z3.s, z23.s, z3.s[3]\n"
+ "fmla z12.s, p1/M, z7.s, z9.s\n"
+ "mov z9.s, #0x0\n"
+ "ld1h { z7.s }, p0/Z, [x22]\n"
+ ".inst 0x451f98a9 // smmla z9.s, z5.b, z31.b\n"
+ "fmla z0.s, p1/M, z22.s, z3.s\n"
+ "mov z22.s, #0x0\n"
+ "ld1h { z3.s }, p0/Z, [x21]\n"
+ ".inst 0x450698b6 // smmla z22.s, z5.b, z6.b\n"
+ "ld1rqb { z5.b }, p1/Z, [x26, #32]\n"
+ "fcvt z7.s, p1/m, z7.h\n"
+ "fcvt z3.s, p1/m, z3.h\n"
+ ".inst 0x450e98a9 // smmla z9.s, z5.b, z14.b\n"
+ ".inst 0x450298b6 // smmla z22.s, z5.b, z2.b\n"
+ "ld1rqb { z5.b }, p1/Z, [x26, #64]\n"
+ "mov z7.q, z7.q[0]\n"
+ "mov z3.q, z3.q[0]\n"
+ ".inst 0x451e98a9 // smmla z9.s, z5.b, z30.b\n"
+ ".inst 0x451598b6 // smmla z22.s, z5.b, z21.b\n"
+ "ld1rqb { z5.b }, p1/Z, [x26, #96]\n"
+ ".inst 0x450498a9 // smmla z9.s, z5.b, z4.b\n"
+ ".inst 0x451198b6 // smmla z22.s, z5.b, z17.b\n"
+ "uzp1 z5.d, z9.d, z22.d\n"
+ "scvtf z5.s, p1/m, z5.s\n"
+ "uzp2 z22.d, z9.d, z22.d\n"
+ "fmul z9.s, z23.s, z7.s[0]\n"
+ "scvtf z22.s, p1/m, z22.s\n"
+ "fmla z13.s, p1/M, z5.s, z9.s\n"
+ "ld1rqb { z9.b }, p1/Z, [x25]\n"
+ "fmul z5.s, z23.s, z7.s[1]\n"
+ "fmla z1.s, p1/M, z22.s, z5.s\n"
+ "mov z5.s, #0x0\n"
+ "mov z22.s, #0x0\n"
+ ".inst 0x451f9a45 // smmla z5.s, z18.b, z31.b\n"
+ ".inst 0x45069a56 // smmla z22.s, z18.b, z6.b\n"
+ "ld1rqb { z18.b }, p1/Z, [x26, #48]\n"
+ ".inst 0x450e9a45 // smmla z5.s, z18.b, z14.b\n"
+ ".inst 0x45029a56 // smmla z22.s, z18.b, z2.b\n"
+ "ld1rqb { z18.b }, p1/Z, [x26, #80]\n"
+ ".inst 0x451e9a45 // smmla z5.s, z18.b, z30.b\n"
+ ".inst 0x45159a56 // smmla z22.s, z18.b, z21.b\n"
+ "ld1rqb { z18.b }, p1/Z, [x26, #112]\n"
+ "add x26, x26, #0x88\n"
+ ".inst 0x45049a45 // smmla z5.s, z18.b, z4.b\n"
+ ".inst 0x45119a56 // smmla z22.s, z18.b, z17.b\n"
+ "uzp1 z18.d, z5.d, z22.d\n"
+ "scvtf z18.s, p1/m, z18.s\n"
+ "uzp2 z22.d, z5.d, z22.d\n"
+ "fmul z5.s, z23.s, z7.s[2]\n"
+ "fmul z7.s, z23.s, z7.s[3]\n"
+ "scvtf z22.s, p1/m, z22.s\n"
+ "fmla z20.s, p1/M, z18.s, z5.s\n"
+ "ld1rqb { z18.b }, p1/Z, [x25, #16]\n"
+ "ld1h { z5.s }, p0/Z, [x20]\n"
+ "fcvt z5.s, p1/m, z5.h\n"
+ "fmla z25.s, p1/M, z22.s, z7.s\n"
+ "mov z22.s, #0x0\n"
+ "mov z7.s, #0x0\n"
+ ".inst 0x451f9936 // smmla z22.s, z9.b, z31.b\n"
+ ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n"
+ "ld1rqb { z9.b }, p1/Z, [x25, #32]\n"
+ "mov z5.q, z5.q[0]\n"
+ ".inst 0x450e9936 // smmla z22.s, z9.b, z14.b\n"
+ ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n"
+ "ld1rqb { z9.b }, p1/Z, [x25, #64]\n"
+ ".inst 0x451e9936 // smmla z22.s, z9.b, z30.b\n"
+ ".inst 0x45159927 // smmla z7.s, z9.b, z21.b\n"
+ "ld1rqb { z9.b }, p1/Z, [x25, #96]\n"
+ ".inst 0x45049936 // smmla z22.s, z9.b, z4.b\n"
+ ".inst 0x45119927 // smmla z7.s, z9.b, z17.b\n"
+ "uzp1 z9.d, z22.d, z7.d\n"
+ "scvtf z9.s, p1/m, z9.s\n"
+ "uzp2 z22.d, z22.d, z7.d\n"
+ "fmul z7.s, z23.s, z3.s[0]\n"
+ "scvtf z22.s, p1/m, z22.s\n"
+ "fmla z11.s, p1/M, z9.s, z7.s\n"
+ "ld1rqb { z9.b }, p1/Z, [x24]\n"
+ "fmul z7.s, z23.s, z3.s[1]\n"
+ "fmla z16.s, p1/M, z22.s, z7.s\n"
+ "mov z22.s, #0x0\n"
+ "mov z7.s, #0x0\n"
+ ".inst 0x451f9a56 // smmla z22.s, z18.b, z31.b\n"
+ ".inst 0x45069a47 // smmla z7.s, z18.b, z6.b\n"
+ "ld1rqb { z18.b }, p1/Z, [x25, #48]\n"
+ ".inst 0x450e9a56 // smmla z22.s, z18.b, z14.b\n"
+ ".inst 0x45029a47 // smmla z7.s, z18.b, z2.b\n"
+ "ld1rqb { z18.b }, p1/Z, [x25, #80]\n"
+ ".inst 0x451e9a56 // smmla z22.s, z18.b, z30.b\n"
+ ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n"
+ "ld1rqb { z18.b }, p1/Z, [x25, #112]\n"
+ "add x25, x25, #0x88\n"
+ ".inst 0x45049a56 // smmla z22.s, z18.b, z4.b\n"
+ ".inst 0x45119a47 // smmla z7.s, z18.b, z17.b\n"
+ "uzp1 z18.d, z22.d, z7.d\n"
+ "scvtf z18.s, p1/m, z18.s\n"
+ "uzp2 z7.d, z22.d, z7.d\n"
+ "fmul z22.s, z23.s, z3.s[2]\n"
+ "fmul z3.s, z23.s, z3.s[3]\n"
+ "scvtf z7.s, p1/m, z7.s\n"
+ "fmla z19.s, p1/M, z18.s, z22.s\n"
+ "ld1rqb { z18.b }, p1/Z, [x24, #16]\n"
+ "fmul z22.s, z23.s, z5.s[0]\n"
+ "fmla z26.s, p1/M, z7.s, z3.s\n"
+ "mov z3.s, #0x0\n"
+ "mov z7.s, #0x0\n"
+ ".inst 0x451f9923 // smmla z3.s, z9.b, z31.b\n"
+ ".inst 0x45069927 // smmla z7.s, z9.b, z6.b\n"
+ "ld1rqb { z9.b }, p1/Z, [x24, #32]\n"
+ ".inst 0x450e9923 // smmla z3.s, z9.b, z14.b\n"
+ ".inst 0x45029927 // smmla z7.s, z9.b, z2.b\n"
+ "mov z9.s, #0x0\n"
+ ".inst 0x451f9a49 // smmla z9.s, z18.b, z31.b\n"
+ "mov z31.s, #0x0\n"
+ ".inst 0x45069a5f // smmla z31.s, z18.b, z6.b\n"
+ "ld1rqb { z6.b }, p1/Z, [x24, #48]\n"
+ "ld1rqb { z18.b }, p1/Z, [x24, #64]\n"
+ ".inst 0x450e98c9 // smmla z9.s, z6.b, z14.b\n"
+ "fmul z14.s, z23.s, z5.s[1]\n"
+ ".inst 0x450298df // smmla z31.s, z6.b, z2.b\n"
+ "ld1rqb { z6.b }, p1/Z, [x24, #80]\n"
+ "fmul z2.s, z23.s, z5.s[2]\n"
+ "fmul z23.s, z23.s, z5.s[3]\n"
+ ".inst 0x451e9a43 // smmla z3.s, z18.b, z30.b\n"
+ ".inst 0x45159a47 // smmla z7.s, z18.b, z21.b\n"
+ "ld1rqb { z5.b }, p1/Z, [x24, #96]\n"
+ ".inst 0x451e98c9 // smmla z9.s, z6.b, z30.b\n"
+ ".inst 0x451598df // smmla z31.s, z6.b, z21.b\n"
+ "ld1rqb { z18.b }, p1/Z, [x24, #112]\n"
+ "add x24, x24, #0x88\n"
+ ".inst 0x450498a3 // smmla z3.s, z5.b, z4.b\n"
+ ".inst 0x451198a7 // smmla z7.s, z5.b, z17.b\n"
+ ".inst 0x45049a49 // smmla z9.s, z18.b, z4.b\n"
+ ".inst 0x45119a5f // smmla z31.s, z18.b, z17.b\n"
+ "uzp1 z18.d, z3.d, z7.d\n"
+ "uzp2 z5.d, z3.d, z7.d\n"
+ "scvtf z18.s, p1/m, z18.s\n"
+ "uzp1 z6.d, z9.d, z31.d\n"
+ "uzp2 z9.d, z9.d, z31.d\n"
+ "scvtf z5.s, p1/m, z5.s\n"
+ "fmla z8.s, p1/M, z18.s, z22.s\n"
+ "scvtf z6.s, p1/m, z6.s\n"
+ "scvtf z9.s, p1/m, z9.s\n"
+ "fmla z29.s, p1/M, z5.s, z14.s\n"
+ "fmla z27.s, p1/M, z6.s, z2.s\n"
+ "fmla z10.s, p1/M, z9.s, z23.s\n"
+ "bgt 3b\n"
+ "mov x20, %x[res_ptr]\n"
+ "subs x10, x10, #0x8\n"
+ "add %x[res_ptr], %x[res_ptr], #0x20\n"
+ "st1w { z24.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z15.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z12.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z0.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z13.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z1.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z20.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z25.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z11.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z16.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z19.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z26.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z8.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z29.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z27.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "st1w { z10.s }, p1, [x20]\n"
+ "bne 2b\n"
+ "mov x20, #0x4\n"
+ "sub x13, x13, #0x10\n"
+ "cmp x13, #0x10\n"
+ "mov %x[res_ptr], x9\n"
+ "madd %x[a_ptr], x20, x12, %x[a_ptr]\n"
+ "bge 1b\n"
+ "4:" // Row loop skip
+ "cbz x13, 9f\n"
+ "5:" // Row tail: Row loop
+ "add x25, %x[b_ptr], #0x10\n"
+ "mov x24, %x[nc]\n"
+ "add x23, %x[res_ptr], %x[res_stride], LSL #2\n"
+ "6:" // Row tail: Column loop
+ "mov z24.b, #0x0\n"
+ "mov z15.b, #0x0\n"
+ "add x28, %x[a_ptr], #0x8\n"
+ "mov x22, %x[nb]\n"
+ "mov z12.b, #0x0\n"
+ "mov z0.b, #0x0\n"
+ "7:" // Row tail: Block loop
+ "ld1b { z3.b }, p1/Z, [x25]\n"
+ "ld1b { z6.b }, p1/Z, [x25, #1, MUL VL]\n"
+ "mov z2.s, #0x0\n"
+ "mov z25.s, #0x0\n"
+ "ld1rqb { z26.b }, p1/Z, [x28]\n"
+ "ld1rqb { z21.b }, p1/Z, [x28, #16]\n"
+ "mov z27.s, #0x0\n"
+ "mov z19.s, #0x0\n"
+ "ld1b { z29.b }, p1/Z, [x25, #2, MUL VL]\n"
+ "ld1b { z16.b }, p1/Z, [x25, #3, MUL VL]\n"
+ "sub x21, x25, #0x10\n"
+ "sub x20, x28, #0x8\n"
+ "lsl z20.b, z3.b, #0x4\n"
+ "lsl z4.b, z6.b, #0x4\n"
+ "ld1rqb { z10.b }, p1/Z, [x28, #32]\n"
+ "ld1rqb { z23.b }, p1/Z, [x28, #48]\n"
+ "and z3.b, z3.b, #0xf0\n"
+ "and z6.b, z6.b, #0xf0\n"
+ "ld1rqb { z11.b }, p1/Z, [x28, #64]\n"
+ "ld1rqb { z7.b }, p1/Z, [x28, #80]\n"
+ "lsl z8.b, z29.b, #0x4\n"
+ "lsl z14.b, z16.b, #0x4\n"
+ "ld1rqb { z18.b }, p1/Z, [x28, #96]\n"
+ "ld1rqb { z30.b }, p1/Z, [x28, #112]\n"
+ ".inst 0x45149b42 // smmla z2.s, z26.b, z20.b\n"
+ ".inst 0x45049b59 // smmla z25.s, z26.b, z4.b\n"
+ "and z29.b, z29.b, #0xf0\n"
+ "ld1h { z17.s }, p1/Z, [x21]\n"
+ ".inst 0x45149abb // smmla z27.s, z21.b, z20.b\n"
+ ".inst 0x45049ab3 // smmla z19.s, z21.b, z4.b\n"
+ "and z16.b, z16.b, #0xf0\n"
+ "ld1h { z4.s }, p0/Z, [x20]\n"
+ "subs x22, x22, #0x1\n"
+ "add x28, x28, #0x88\n"
+ "fcvt z17.s, p1/m, z17.h\n"
+ "add x25, x25, #0x90\n"
+ ".inst 0x45089942 // smmla z2.s, z10.b, z8.b\n"
+ ".inst 0x450e9959 // smmla z25.s, z10.b, z14.b\n"
+ "fcvt z4.s, p1/m, z4.h\n"
+ ".inst 0x45089afb // smmla z27.s, z23.b, z8.b\n"
+ ".inst 0x450e9af3 // smmla z19.s, z23.b, z14.b\n"
+ "fscale z17.s, p1/m, z17.s, z28.s\n"
+ "mov z4.q, z4.q[0]\n"
+ ".inst 0x45039962 // smmla z2.s, z11.b, z3.b\n"
+ ".inst 0x45069979 // smmla z25.s, z11.b, z6.b\n"
+ "fmul z23.s, z17.s, z4.s[0]\n"
+ "fmul z9.s, z17.s, z4.s[1]\n"
+ "fmul z21.s, z17.s, z4.s[2]\n"
+ "fmul z4.s, z17.s, z4.s[3]\n"
+ ".inst 0x450398fb // smmla z27.s, z7.b, z3.b\n"
+ ".inst 0x450698f3 // smmla z19.s, z7.b, z6.b\n"
+ ".inst 0x451d9a42 // smmla z2.s, z18.b, z29.b\n"
+ ".inst 0x45109a59 // smmla z25.s, z18.b, z16.b\n"
+ ".inst 0x451d9bdb // smmla z27.s, z30.b, z29.b\n"
+ ".inst 0x45109bd3 // smmla z19.s, z30.b, z16.b\n"
+ "uzp1 z31.d, z2.d, z25.d\n"
+ "uzp2 z13.d, z2.d, z25.d\n"
+ "scvtf z31.s, p1/m, z31.s\n"
+ "uzp1 z17.d, z27.d, z19.d\n"
+ "uzp2 z18.d, z27.d, z19.d\n"
+ "scvtf z13.s, p1/m, z13.s\n"
+ "fmla z24.s, p1/M, z31.s, z23.s\n"
+ "scvtf z17.s, p1/m, z17.s\n"
+ "scvtf z18.s, p1/m, z18.s\n"
+ "fmla z15.s, p1/M, z13.s, z9.s\n"
+ "fmla z12.s, p1/M, z17.s, z21.s\n"
+ "fmla z0.s, p1/M, z18.s, z4.s\n"
+ "bgt 7b\n"
+ "mov x20, %x[res_ptr]\n"
+ "cmp x13, #0x1\n"
+ "st1w { z24.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "cmp x13, #0x2\n"
+ "st1w { z15.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "cmp x13, #0x3\n"
+ "st1w { z12.s }, p1, [x20]\n"
+ "add x20, x20, %x[res_stride]\n"
+ "ble 8f\n"
+ "st1w { z0.s }, p1, [x20]\n"
+ "8:" // Row tail: Accumulator store skip
+ "subs x24, x24, #0x8\n"
+ "add %x[res_ptr], %x[res_ptr], #0x20\n"
+ "bne 6b\n"
+ "subs x13, x13, #0x4\n"
+ "add %x[a_ptr], %x[a_ptr], x12\n"
+ "mov %x[res_ptr], x23\n"
+ "bgt 5b\n"
+ "9:" // Row tail: Row loop skip
+ : [a_ptr] "+&r" (a_ptr), [res_ptr] "+&r" (res_ptr)
+ : [b_ptr] "r" (b_ptr), [nr] "r" (nr), [nb] "r" (nb), [res_stride] "r" (res_stride), [nc] "r" (nc)
+ : "cc", "memory", "p0", "p1", "x9", "x10", "x11", "x12", "x13", "x20", "x21", "x22", "x23", "x24", "x25", "x26", "x27", "x28", "z0", "z1", "z2", "z3", "z4", "z5", "z6", "z7", "z8", "z9", "z10", "z11", "z12", "z13", "z14", "z15", "z16", "z17", "z18", "z19", "z20", "z21", "z22", "z23", "z24", "z25", "z26", "z27", "z28", "z29", "z30", "z31"
+ );
+ return;
+ }
+#endif // #if defined(__ARM_FEATURE_SVE) && defined(__ARM_FEATURE_MATMUL_INT8)
+
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__)
+ ggml_gemm_q4_0_8x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_iq4_nl_4x4_q8_0(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 4;
+
+ assert (n % qk == 0);
+ assert (nr % 4 == 0);
+ assert (nc % ncols_interleaved == 0);
+
+ UNUSED(s);
+ UNUSED(bs);
+ UNUSED(vx);
+ UNUSED(vy);
+ UNUSED(nr);
+ UNUSED(nc);
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ const int8x16_t kvalues = vld1q_s8(kvalues_iq4nl);
+
+ for (int y = 0; y < nr / 4; y++) {
+ const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_iq4_nlx4 * b_ptr = (const block_iq4_nlx4 *) vx + (x * nb);
+
+ float32x4_t sumf[4];
+ for (int m = 0; m < 4; m++) {
+ sumf[m] = vdupq_n_f32(0);
+ }
+
+ for (int l = 0; l < nb; l++) {
+ float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *)a_ptr[l].d));
+ float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *)b_ptr[l].d));
+
+ int32x4_t sumi_0 = vdupq_n_s32(0);
+ int32x4_t sumi_1 = vdupq_n_s32(0);
+ int32x4_t sumi_2 = vdupq_n_s32(0);
+ int32x4_t sumi_3 = vdupq_n_s32(0);
+
+ for (int k = 0; k < 4; k++) {
+ int8x16_t a_0 = vld1q_s8(a_ptr[l].qs + 16 * k + 0);
+ int8x16_t a_1 = vld1q_s8(a_ptr[l].qs + 16 * k + 64);
+
+ uint8x16_t b = vld1q_u8(b_ptr[l].qs + 16 * k);
+ int8x16_t b_hi = vqtbl1q_s8(kvalues, b >> 4);
+ int8x16_t b_lo = vqtbl1q_s8(kvalues, b & 0xF);
+
+ sumi_0 = vdotq_laneq_s32(sumi_0, b_lo, a_0, 0);
+ sumi_1 = vdotq_laneq_s32(sumi_1, b_lo, a_0, 1);
+ sumi_2 = vdotq_laneq_s32(sumi_2, b_lo, a_0, 2);
+ sumi_3 = vdotq_laneq_s32(sumi_3, b_lo, a_0, 3);
+ sumi_0 = vdotq_laneq_s32(sumi_0, b_hi, a_1, 0);
+ sumi_1 = vdotq_laneq_s32(sumi_1, b_hi, a_1, 1);
+ sumi_2 = vdotq_laneq_s32(sumi_2, b_hi, a_1, 2);
+ sumi_3 = vdotq_laneq_s32(sumi_3, b_hi, a_1, 3);
+ }
+
+ sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0));
+ sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1));
+ sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2));
+ sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3));
+ }
+
+ for (int m = 0; m < 4; m++) {
+ vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]);
+ }
+ }
+ }
+ return;
+#endif // #if ! ((defined(_MSC_VER)) && ! defined(__clang__)) && defined(__aarch64__) && defined(__ARM_NEON)
+ ggml_gemm_iq4_nl_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q4_K_8x4_q8_K(int n, float * GGML_RESTRICT s, size_t bs, const void * GGML_RESTRICT vx, const void * GGML_RESTRICT vy, int nr, int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 4;
+
+ assert(n % qk == 0);
+ assert(nr % 4 == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ constexpr int q8_k_blocklen = 4;
+ constexpr int acc_size = 2 * 4; // 2 row pairs × 4 col pairs
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+
+ // 8 accumulators: 2 row pairs × 4 col pairs
+ float32x4_t acc_f32[acc_size];
+
+ for (int y = 0; y < nr / q8_k_blocklen; y++) {
+ const block_q8_Kx4 * GGML_RESTRICT q8_ptr = (const block_q8_Kx4 *) vy + (y * nb);
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < acc_size; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ // d4 0 1 2 3, 4 5 6 7
+ float32x4_t q4_d_0123 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d));
+ float32x4_t q4_d_4567 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].d + 4));
+ // d8 0 1 2 3
+ float32x4_t q8_d_0123 = vld1q_f32(q8_ptr[b].d);
+ // mins
+ float32x4_t q4_dmin_0123 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin));
+ float32x4_t q4_dmin_4567 = vcvt_f32_f16(vld1_f16((const __fp16 *) q4_ptr[b].dmin + 4));
+
+ // Precomputation of scales and mins
+ float32x4_t sbd_scale_0123[q8_k_blocklen];
+ float32x4_t sbd_scale_4567[q8_k_blocklen];
+ float32x4_t sbd_min_0123[q8_k_blocklen];
+ float32x4_t sbd_min_4567[q8_k_blocklen];
+
+ sbd_scale_0123[0] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 0);
+ sbd_scale_4567[0] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 0);
+ sbd_min_0123[0] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 0);
+ sbd_min_4567[0] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 0);
+
+ sbd_scale_0123[1] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 1);
+ sbd_scale_4567[1] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 1);
+ sbd_min_0123[1] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 1);
+ sbd_min_4567[1] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 1);
+
+ sbd_scale_0123[2] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 2);
+ sbd_scale_4567[2] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 2);
+ sbd_min_0123[2] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 2);
+ sbd_min_4567[2] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 2);
+
+ sbd_scale_0123[3] = vmulq_laneq_f32(q4_d_0123, q8_d_0123, 3);
+ sbd_scale_4567[3] = vmulq_laneq_f32(q4_d_4567, q8_d_0123, 3);
+ sbd_min_0123[3] = vmulq_laneq_f32(q4_dmin_0123, q8_d_0123, 3);
+ sbd_min_4567[3] = vmulq_laneq_f32(q4_dmin_4567, q8_d_0123, 3);
+
+ // Precomputation of bsums, each vpaddq calcs all the bsums for each row
+ const int16x8_t bsums[q8_k_blocklen] = {
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 0), vld1q_s16(q8_ptr[b].bsums + 16 * 0 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 1), vld1q_s16(q8_ptr[b].bsums + 16 * 1 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 2), vld1q_s16(q8_ptr[b].bsums + 16 * 2 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 3), vld1q_s16(q8_ptr[b].bsums + 16 * 3 + 8)),
+ };
+ int16_t bsums_arr[QK_K / 64][8];
+ for (int q8_row = 0; q8_row < 4; q8_row++) {
+ vst1q_s16(bsums_arr[q8_row], bsums[q8_row]);
+ }
+
+ // interleaved bias_acc: [0]->r0 0123, [1]->r1 0123, .., [4]->r0 4567, [5]->r1 4567 ..
+ int32x4_t bias_acc[acc_size];
+ for (int i = 0; i < acc_size; i++) {
+ bias_acc[i] = vdupq_n_s32(0);
+ }
+
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ // Int accumulators for qs vecdot (4 row x 2 col quartets)
+ int32x4_t acc_lo[acc_size];
+ int32x4_t acc_hi[acc_size];
+ for (int i = 0; i < acc_size; i++) {
+ acc_lo[i] = vdupq_n_s32(0);
+ acc_hi[i] = vdupq_n_s32(0);
+ }
+ // Need scales for the low and high nibbles
+ // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total
+ int16x8_t q4sb_scales[2];
+ int16x8_t q4sb_mins[2];
+ for (int i = 0; i < 2; i++) {
+ int8_t aux_q4sb[8];
+ const int offset = sb * 24 + i * 12;
+ decode_q_Kx8_6bit_scales(&q4_ptr[b].scales[offset], &q4sb_mins[i], aux_q4sb);
+ q4sb_scales[i] = vmovl_s8(vld1_s8(aux_q4sb));
+ }
+
+ constexpr int reads_per_sb = 8; // 8 * 16 bytes each => 32 qs * 4 rows
+ for (int k = 0; k < reads_per_sb; k++) {
+ const int8x16_t q8_blk0 = vld1q_s8(q8_ptr[b].qs + sb * 256 + 16 * k);
+ const int8x16_t q8_blk1 = vld1q_s8(q8_ptr[b].qs + sb * 256 + 16 * k + 128);
+
+ // 0..3 & 32..35
+ const uint8x16_t q4_0123 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 32 * k);
+ const uint8x16_t q4_4567 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 32 * k + 16);
+
+ const int8x16_t q4_0123_lo = vreinterpretq_s8_u8(vandq_u8(q4_0123, m4b));
+ const int8x16_t q4_0123_hi = vreinterpretq_s8_u8(vshrq_n_u8(q4_0123, 4));
+
+ acc_lo[0] = vdotq_laneq_s32(acc_lo[0], q4_0123_lo, q8_blk0, 0); // 0..3 r0 c0123
+ acc_lo[1] = vdotq_laneq_s32(acc_lo[1], q4_0123_lo, q8_blk0, 1); // 0..3 r1 c0123
+ acc_lo[2] = vdotq_laneq_s32(acc_lo[2], q4_0123_lo, q8_blk0, 2); // 0..3 r2 c0123
+ acc_lo[3] = vdotq_laneq_s32(acc_lo[3], q4_0123_lo, q8_blk0, 3); // 0..3 r3 c0123
+
+ acc_hi[0] = vdotq_laneq_s32(acc_hi[0], q4_0123_hi, q8_blk1, 0); // 32..35 r0 c0123
+ acc_hi[1] = vdotq_laneq_s32(acc_hi[1], q4_0123_hi, q8_blk1, 1); // 32..35 r1 c0123
+ acc_hi[2] = vdotq_laneq_s32(acc_hi[2], q4_0123_hi, q8_blk1, 2); // 32..35 r2 c0123
+ acc_hi[3] = vdotq_laneq_s32(acc_hi[3], q4_0123_hi, q8_blk1, 3); // 32..35 r3 c0123
+
+ const int8x16_t q4_4567_lo = vreinterpretq_s8_u8(vandq_u8(q4_4567, m4b));
+ const int8x16_t q4_4567_hi = vreinterpretq_s8_u8(vshrq_n_u8(q4_4567, 4));
+
+ acc_lo[4] = vdotq_laneq_s32(acc_lo[4], q4_4567_lo, q8_blk0, 0); // 0..3 r0 c4567
+ acc_lo[5] = vdotq_laneq_s32(acc_lo[5], q4_4567_lo, q8_blk0, 1); // 0..3 r1 c4567
+ acc_lo[6] = vdotq_laneq_s32(acc_lo[6], q4_4567_lo, q8_blk0, 2); // 0..3 r2 c4567
+ acc_lo[7] = vdotq_laneq_s32(acc_lo[7], q4_4567_lo, q8_blk0, 3); // 0..3 r3 c4567
+
+ acc_hi[4] = vdotq_laneq_s32(acc_hi[4], q4_4567_hi, q8_blk1, 0); // 32..35 r0 c4567
+ acc_hi[5] = vdotq_laneq_s32(acc_hi[5], q4_4567_hi, q8_blk1, 1); // 32..35 r1 c4567
+ acc_hi[6] = vdotq_laneq_s32(acc_hi[6], q4_4567_hi, q8_blk1, 2); // 32..35 r2 c4567
+ acc_hi[7] = vdotq_laneq_s32(acc_hi[7], q4_4567_hi, q8_blk1, 3); // 32..35 r3 c4567
+ }
+
+ // Scale and bias application
+ // acc is stored interleaved to match output layout
+ const int16x4_t sc_0123_lo = vget_low_s16(q4sb_scales[0]);
+ const int16x4_t sc_4567_lo = vget_high_s16(q4sb_scales[0]);
+ const int16x4_t sc_0123_hi = vget_low_s16(q4sb_scales[1]);
+ const int16x4_t sc_4567_hi = vget_high_s16(q4sb_scales[1]);
+ for (int row = 0; row < q8_k_blocklen; row++) {
+ // Bias correction
+ // row c0123 blk0 and blk1
+ const float32x4_t sumf_0123 =
+ vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_0123_lo), acc_lo[row]),
+ vmulq_s32(vmovl_s16(sc_0123_hi), acc_hi[row])));
+ acc_f32[2 * row] = vfmaq_f32(acc_f32[2 * row], sbd_scale_0123[row], sumf_0123);
+
+ // row c4567 blk0 and blk1
+ const float32x4_t sumf_4567 =
+ vcvtq_f32_s32(vaddq_s32(vmulq_s32(vmovl_s16(sc_4567_lo), acc_lo[row + 4]),
+ vmulq_s32(vmovl_s16(sc_4567_hi), acc_hi[row + 4])));
+ acc_f32[2 * row + 1] = vfmaq_f32(acc_f32[2 * row + 1], sbd_scale_4567[row], sumf_4567);
+
+ // Bias
+ const int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[sb][row * 2]);
+ const int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[sb][row * 2 + 1]);
+
+ // row c0123 blk0 and blk1
+ bias_acc[2 * row] = vmlal_s16(bias_acc[2 * row], bsums_vec_lo, vget_low_s16(q4sb_mins[0]));
+ bias_acc[2 * row] = vmlal_s16(bias_acc[2 * row], bsums_vec_hi, vget_low_s16(q4sb_mins[1]));
+
+ // row c4567 blk0 and blk1
+ bias_acc[2 * row + 1] =
+ vmlal_s16(bias_acc[2 * row + 1], bsums_vec_lo, vget_high_s16(q4sb_mins[0]));
+ bias_acc[2 * row + 1] =
+ vmlal_s16(bias_acc[2 * row + 1], bsums_vec_hi, vget_high_s16(q4sb_mins[1]));
+ }
+ } // for sb
+
+ for (int row = 0; row < q8_k_blocklen; row++) {
+ acc_f32[2 * row] = vmlsq_f32(acc_f32[2 * row], vcvtq_f32_s32(bias_acc[2 * row]), sbd_min_0123[row]);
+ acc_f32[2 * row + 1] =
+ vmlsq_f32(acc_f32[2 * row + 1], vcvtq_f32_s32(bias_acc[2 * row + 1]), sbd_min_4567[row]);
+ }
+ } // for b
+
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ int row = y * q8_k_blocklen + i;
+ for (int j = 0; j < 2; j++) {
+ int col = x * ncols_interleaved + j * 4;
+ int offset = row * bs + col;
+ vst1q_f32(s + offset, acc_f32[2 * i + j]);
+ }
+ }
+ } // for x
+ } // for y
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemm_q4_K_8x4_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q4_K_8x8_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nr % 4 == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ constexpr int q8_k_blocklen = 4;
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+
+ // 8 accumulators: 2 row pairs × 4 col pairs
+ float32x4_t acc_f32[blocklen];
+
+ for (int y = 0; y < nr / q8_k_blocklen; y++) {
+ const block_q8_Kx4 * GGML_RESTRICT q8_ptr = (const block_q8_Kx4 *) vy + (y * nb);
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q4_Kx8 * GGML_RESTRICT q4_ptr = (const block_q4_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < blocklen; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ // bsums pairs belongs to the same q8_k subblock
+ const int16x8_t bsums[4]{
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 0), vld1q_s16(q8_ptr[b].bsums + 16 * 0 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 1), vld1q_s16(q8_ptr[b].bsums + 16 * 1 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 2), vld1q_s16(q8_ptr[b].bsums + 16 * 2 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 3), vld1q_s16(q8_ptr[b].bsums + 16 * 3 + 8)),
+ };
+ int16_t bsums_arr[4][8];
+ for (int q8_row = 0; q8_row < 4; q8_row++) {
+ vst1q_s16(bsums_arr[q8_row], bsums[q8_row]);
+ }
+
+ int32x4_t sb_acc[4]; // Aux accumulators to store subblock (partial) results
+ int32x4_t acc[8]; // rows 01 stored in [0][1][2][3] rows 23 stored in [4][5][6][7]
+ int32x4_t bias_acc[8]; // interleaved bias_acc: [0]->r0 0123, [1]->r0 4567, [2]->r1 0123 ...
+ for (int i = 0; i < 8; i++) {
+ acc[i] = vdupq_n_s32(0);
+ bias_acc[i] = vdupq_n_s32(0);
+ }
+
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ // Need scales for the low and high nibbles
+ // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total
+ int8_t q4sb_scales[2][8];
+ int16x8_t q4sb_mins[2]; // int16 as its needed for bias_acc later
+ for (int i = 0; i < 2; i++) {
+ const int offset = sb * 24 + i * 12;
+ decode_q_Kx8_6bit_scales(&q4_ptr[b].scales[offset], &q4sb_mins[i], q4sb_scales[i]);
+ }
+
+ // q8_ptr[b].qs has interleaved Q8 rows (01, 23)
+ const int8_t * q8_base = q8_ptr[b].qs + sb * 256;
+
+ int8x16_t q8_qs_01[8];
+ int8x16_t q8_qs_23[8];
+
+ // Load 32-byte per row pair, 1 subblock each time
+ for (int i = 0; i < 8; i++) {
+ const int offset = i * 32; // 16 for row 01, 16 for row 23
+ q8_qs_01[i] = vld1q_s8(q8_base + offset);
+ q8_qs_23[i] = vld1q_s8(q8_base + offset + 16);
+ }
+
+ const int8x16_t q8s[2][8] = {
+ { q8_qs_01[0], q8_qs_01[1], q8_qs_01[2], q8_qs_01[3],
+ q8_qs_01[4], q8_qs_01[5], q8_qs_01[6], q8_qs_01[7] },
+ { q8_qs_23[0], q8_qs_23[1], q8_qs_23[2], q8_qs_23[3],
+ q8_qs_23[4], q8_qs_23[5], q8_qs_23[6], q8_qs_23[7] },
+ };
+
+ // Q4s columns iterated in pairs (01, 23, 45, 67)
+ for (int cp = 0; cp < ncols_interleaved / 2; cp++) {
+ for (int i = 0; i < 4; i++) {
+ sb_acc[i] = vdupq_n_s32(0);
+ }
+
+ uint8x16_t q4_qs_cp_0 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 0); // 0 .. 7 & 32..39
+ uint8x16_t q4_qs_cp_1 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 64); // 8 ..15 & 40..47
+ uint8x16_t q4_qs_cp_2 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 128); // 16..23 & 48..55
+ uint8x16_t q4_qs_cp_3 = vld1q_u8(q4_ptr[b].qs + sb * QK_K + 16 * cp + 192); // 24..31 & 56..63
+ const int8x16_t q4_nibbles[2][4] = {
+ {
+ vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_0, m4b)),
+ vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_1, m4b)),
+ vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_2, m4b)),
+ vreinterpretq_s8_u8(vandq_u8(q4_qs_cp_3, m4b)),
+ },
+ {
+ vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_0, 4)),
+ vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_1, 4)),
+ vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_2, 4)),
+ vreinterpretq_s8_u8(vshrq_n_u8(q4_qs_cp_3, 4)),
+ }
+ };
+
+ // Calculates the Qs muladd of every row pair (rp) rows 01 and 23 of q8
+ // for each of the internal 32 qs subblock (blk)
+ for (int rp = 0; rp < 2; rp++) {
+ for (int blk = 0; blk < 2; blk++) {
+ const int8x16_t * q8 = &q8s[rp][4 * blk];
+ const int8x16_t * q4 = q4_nibbles[blk];
+ int32x4_t acc = sb_acc[2 * rp + blk];
+ // mul add for each qs in the same subblock
+ for (int qs_offset = 0; qs_offset < 4; qs_offset++) {
+ acc = vmmlaq_s32(acc, q4[qs_offset], q8[qs_offset]);
+ }
+ sb_acc[2 * rp + blk] = acc;
+ }
+ }
+
+ // Scales[i] corresponds to column i
+ const int scale_offset = cp * 2;
+ const int32_t scale_00 = q4sb_scales[0][scale_offset];
+ const int32_t scale_01 = q4sb_scales[0][scale_offset + 1];
+ const int32_t scale_10 = q4sb_scales[1][scale_offset];
+ const int32_t scale_11 = q4sb_scales[1][scale_offset + 1];
+ const int32x4_t block_scale_0 = vcombine_s32(vdup_n_s32(scale_00), vdup_n_s32(scale_01));
+ const int32x4_t block_scale_1 = vcombine_s32(vdup_n_s32(scale_10), vdup_n_s32(scale_11));
+
+ acc[cp] = vmlaq_s32(acc[cp], sb_acc[0], block_scale_0);
+ acc[cp + 4] = vmlaq_s32(acc[cp + 4], sb_acc[2], block_scale_0);
+ acc[cp] = vmlaq_s32(acc[cp], sb_acc[1], block_scale_1);
+ acc[cp + 4] = vmlaq_s32(acc[cp + 4], sb_acc[3], block_scale_1);
+ }
+
+ // Multiply Acc bsum + mins
+ for (int q8_row = 0; q8_row < 4; q8_row++) {
+ // Each pair of subblocks share the same bsums
+ // Load scalar bsum → broadcast to a vector (vdupq_n_s16(s)).
+ int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[sb][q8_row * 2]);
+ int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[sb][q8_row * 2 + 1]);
+
+ bias_acc[2 * q8_row] =
+ vmlal_s16(bias_acc[2 * q8_row], bsums_vec_lo, vget_low_s16(q4sb_mins[0]));
+ bias_acc[2 * q8_row] =
+ vmlal_s16(bias_acc[2 * q8_row], bsums_vec_hi, vget_low_s16(q4sb_mins[1]));
+ bias_acc[2 * q8_row + 1] =
+ vmlal_s16(bias_acc[2 * q8_row + 1], bsums_vec_lo, vget_high_s16(q4sb_mins[0]));
+ bias_acc[2 * q8_row + 1] =
+ vmlal_s16(bias_acc[2 * q8_row + 1], bsums_vec_hi, vget_high_s16(q4sb_mins[1]));
+ }
+ } // for sb
+
+ // Reorder of i8mm output with bias and output layout
+ for (int i = 0; i < 8; i++) {
+ int32x2x2_t aux = vzip_s32(vget_low_s32(acc[i]), vget_high_s32(acc[i]));
+ acc[i] = vcombine_s32(aux.val[0], aux.val[1]);
+ }
+ int32x4_t reorder_acc[8] = {
+ vcombine_s32(vget_low_s32(acc[0]), vget_low_s32(acc[1])),
+ vcombine_s32(vget_low_s32(acc[2]), vget_low_s32(acc[3])),
+ vcombine_s32(vget_high_s32(acc[0]), vget_high_s32(acc[1])),
+ vcombine_s32(vget_high_s32(acc[2]), vget_high_s32(acc[3])),
+ vcombine_s32(vget_low_s32(acc[4]), vget_low_s32(acc[5])),
+ vcombine_s32(vget_low_s32(acc[6]), vget_low_s32(acc[7])),
+ vcombine_s32(vget_high_s32(acc[4]), vget_high_s32(acc[5])),
+ vcombine_s32(vget_high_s32(acc[6]), vget_high_s32(acc[7])),
+ };
+
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ for (int j = 0; j < 2; j++) {
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d[i]);
+ float32x4_t q4_dmin = vcvt_f32_f16(vld1_f16((const __fp16 *) (q4_ptr[b].dmin + j * 4)));
+ const float32x4_t dmins = vmulq_f32(q4_dmin, q8_d);
+
+ float32x4_t q4_d = vcvt_f32_f16(vld1_f16((const __fp16 *) (q4_ptr[b].d + j * 4)));
+ const float32x4_t scale = vmulq_f32(q4_d, q8_d);
+
+ acc_f32[2 * i + j] = vmlsq_f32(acc_f32[2 * i + j], vcvtq_f32_s32(bias_acc[2 * i + j]), dmins);
+ acc_f32[2 * i + j] =
+ vmlaq_f32(acc_f32[2 * i + j], vcvtq_f32_s32(reorder_acc[2 * i + j]), scale);
+ }
+ }
+ } // for b
+
+ // With the previous reorder, the tile is already in the correct memory layout.
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ int row = y * q8_k_blocklen + i;
+ for (int j = 0; j < 2; j++) {
+ int col = x * ncols_interleaved + j * 4;
+ int offset = row * bs + col;
+ vst1q_f32(s + offset, acc_f32[2 * i + j]);
+ }
+ }
+ } // for x
+ } // for y
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ ggml_gemm_q4_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q5_K_8x8_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nr % 4 == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ constexpr int q8_k_blocklen = 4;
+ constexpr int col_pairs = ncols_interleaved / 2;
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ const uint8x16_t mone = vdupq_n_u8(1);
+ const uint8x16_t mtwo = vdupq_n_u8(2);
+
+ // 8 accumulators: 2 row pairs × 4 col pairs
+ float32x4_t acc_f32[blocklen];
+
+ for (int y = 0; y < nr / q8_k_blocklen; y++) {
+ const block_q8_Kx4 * GGML_RESTRICT q8_ptr = (const block_q8_Kx4 *) vy + (y * nb);
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q5_Kx8 * GGML_RESTRICT q5_ptr = (const block_q5_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < blocklen; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ // bsums pairs belongs to the same q8_k subblock
+ const int16x8_t bsums[4]{
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 0), vld1q_s16(q8_ptr[b].bsums + 16 * 0 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 1), vld1q_s16(q8_ptr[b].bsums + 16 * 1 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 2), vld1q_s16(q8_ptr[b].bsums + 16 * 2 + 8)),
+ vpaddq_s16(vld1q_s16(q8_ptr[b].bsums + 16 * 3), vld1q_s16(q8_ptr[b].bsums + 16 * 3 + 8)),
+ };
+ int16_t bsums_arr[4][8];
+ for (int q8_row = 0; q8_row < 4; q8_row++) {
+ vst1q_s16(bsums_arr[q8_row], bsums[q8_row]);
+ }
+
+ int32x4_t sb_acc[4]; // Aux accumulators to store subblock (partial) results
+ int32x4_t acc[8]; // rows 01 stored in [0][1][2][3] rows 23 stored in [4][5][6][7]
+ int32x4_t bias_acc[8]; // interleaved bias_acc: [0]->r0 0123, [1]->r0 4567, [2]->r1 0123 ...
+ for (int i = 0; i < 8; i++) {
+ acc[i] = vdupq_n_s32(0);
+ bias_acc[i] = vdupq_n_s32(0);
+ }
+
+ // Load qh once per block and shift after each subblock
+ const uint8_t * qh_base = q5_ptr[b].qh;
+ uint8x16_t qh[col_pairs][4];
+ for (int cp = 0; cp < col_pairs; cp++) {
+ qh[cp][0] = vld1q_u8(qh_base + 16 * cp);
+ qh[cp][1] = vld1q_u8(qh_base + 16 * cp + 64);
+ qh[cp][2] = vld1q_u8(qh_base + 16 * cp + 128);
+ qh[cp][3] = vld1q_u8(qh_base + 16 * cp + 192);
+ }
+
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ // Need scales for the low and high nibbles
+ // 2 * 12 = 24 bytes per subblock, 4 sbs -> 4 * 24 = 96 bytes total
+ int8_t q5sb_scales[2][8];
+ int16x8_t q5sb_mins[2]; // int16 as its needed for bias_acc later
+ for (int i = 0; i < 2; i++) {
+ const int offset = sb * 24 + i * 12;
+ decode_q_Kx8_6bit_scales(&q5_ptr[b].scales[offset], &q5sb_mins[i], q5sb_scales[i]);
+ }
+
+ // q8_ptr[b].qs has interleaved Q8 rows (01, 23)
+ const int8_t * q8_base = q8_ptr[b].qs + sb * 256;
+
+ int8x16_t q8_qs_01[8];
+ int8x16_t q8_qs_23[8];
+
+ // Load 32-byte per row pair, 1 subblock each time
+ for (int i = 0; i < 8; i++) {
+ const int offset = i * 32; // 16 for row 01, 16 for row 23
+ q8_qs_01[i] = vld1q_s8(q8_base + offset);
+ q8_qs_23[i] = vld1q_s8(q8_base + offset + 16);
+ }
+
+ const int8x16_t q8s[2][8] = {
+ { q8_qs_01[0], q8_qs_01[1], q8_qs_01[2], q8_qs_01[3], q8_qs_01[4], q8_qs_01[5], q8_qs_01[6],
+ q8_qs_01[7] },
+ { q8_qs_23[0], q8_qs_23[1], q8_qs_23[2], q8_qs_23[3], q8_qs_23[4], q8_qs_23[5], q8_qs_23[6],
+ q8_qs_23[7] },
+ };
+
+ // Q5s columns iterated in pairs (01, 23, 45, 67)
+ for (int cp = 0; cp < col_pairs; cp++) {
+ for (int i = 0; i < 4; i++) {
+ sb_acc[i] = vdupq_n_s32(0);
+ }
+
+ uint8x16_t qs_cp_0 = vld1q_u8(q5_ptr[b].qs + sb * QK_K + 16 * cp + 0); // 0 .. 7 & 32..39
+ uint8x16_t qs_cp_1 = vld1q_u8(q5_ptr[b].qs + sb * QK_K + 16 * cp + 64); // 8 ..15 & 40..47
+ uint8x16_t qs_cp_2 = vld1q_u8(q5_ptr[b].qs + sb * QK_K + 16 * cp + 128); // 16..23 & 48..55
+ uint8x16_t qs_cp_3 = vld1q_u8(q5_ptr[b].qs + sb * QK_K + 16 * cp + 192); // 24..31 & 56..63
+
+ // This is the only part of the algorithm that differs with Q4_K
+ // Extract High bits and pack into 5 bit weights
+ uint8x16_t hbit_lo_0 = vandq_u8(qh[cp][0], mone);
+ uint8x16_t hbit_hi_0 = vshlq_n_u8(vandq_u8(qh[cp][0], mtwo), 3);
+ qh[cp][0] = vshrq_n_u8(qh[cp][0], 2);
+ // Same as Q4_K, i8mm to dequantize the weights.
+ const int8x16_t qs_lo_0 = vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_cp_0, m4b), hbit_lo_0, 4));
+ int32x4_t acc_0 = sb_acc[0];
+ acc_0 = vmmlaq_s32(acc_0, qs_lo_0, q8s[0][0]);
+ int32x4_t acc_2 = sb_acc[2];
+ acc_2 = vmmlaq_s32(acc_2, qs_lo_0, q8s[1][0]);
+ const int8x16_t qs_hi_0 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_cp_0, 4), hbit_hi_0));
+ int32x4_t acc_1 = sb_acc[1];
+ acc_1 = vmmlaq_s32(acc_1, qs_hi_0, q8s[0][4]);
+ int32x4_t acc_3 = sb_acc[3];
+ acc_3 = vmmlaq_s32(acc_3, qs_hi_0, q8s[1][4]);
+
+ // Repeat for the other 3 columns (8..15, 16..23, 24..31)
+ uint8x16_t hbit_hi_1 = vshlq_n_u8(vandq_u8(qh[cp][1], mtwo), 3);
+ uint8x16_t hbit_lo_1 = vandq_u8(qh[cp][1], mone);
+ qh[cp][1] = vshrq_n_u8(qh[cp][1], 2);
+ const int8x16_t qs_lo_1 = vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_cp_1, m4b), hbit_lo_1, 4));
+ acc_0 = vmmlaq_s32(acc_0, qs_lo_1, q8s[0][1]);
+ acc_2 = vmmlaq_s32(acc_2, qs_lo_1, q8s[1][1]);
+ const int8x16_t qs_hi_1 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_cp_1, 4), hbit_hi_1));
+ acc_1 = vmmlaq_s32(acc_1, qs_hi_1, q8s[0][5]);
+ acc_3 = vmmlaq_s32(acc_3, qs_hi_1, q8s[1][5]);
+
+ uint8x16_t hbit_hi_2 = vshlq_n_u8(vandq_u8(qh[cp][2], mtwo), 3);
+ uint8x16_t hbit_lo_2 = vandq_u8(qh[cp][2], mone);
+ qh[cp][2] = vshrq_n_u8(qh[cp][2], 2);
+ const int8x16_t qs_lo_2 = vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_cp_2, m4b), hbit_lo_2, 4));
+ acc_0 = vmmlaq_s32(acc_0, qs_lo_2, q8s[0][2]);
+ acc_2 = vmmlaq_s32(acc_2, qs_lo_2, q8s[1][2]);
+ const int8x16_t qs_hi_2 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_cp_2, 4), hbit_hi_2));
+ acc_1 = vmmlaq_s32(acc_1, qs_hi_2, q8s[0][6]);
+ acc_3 = vmmlaq_s32(acc_3, qs_hi_2, q8s[1][6]);
+
+ uint8x16_t hbit_lo_3 = vandq_u8(qh[cp][3], mone);
+ uint8x16_t hbit_hi_3 = vshlq_n_u8(vandq_u8(qh[cp][3], mtwo), 3);
+ qh[cp][3] = vshrq_n_u8(qh[cp][3], 2);
+ const int8x16_t qs_lo_3 = vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(qs_cp_3, m4b), hbit_lo_3, 4));
+ acc_0 = vmmlaq_s32(acc_0, qs_lo_3, q8s[0][3]);
+ sb_acc[0] = acc_0;
+ acc_2 = vmmlaq_s32(acc_2, qs_lo_3, q8s[1][3]);
+ sb_acc[2] = acc_2;
+
+ // Scales[i] corresponds to column i
+ const int scale_offset = cp * 2;
+ const int32_t s0 = q5sb_scales[0][scale_offset];
+ const int32_t s1 = q5sb_scales[0][scale_offset + 1];
+ const int32x4_t block_scale = vcombine_s32(vdup_n_s32(s0), vdup_n_s32(s1));
+ acc[cp] = vmlaq_s32(acc[cp], sb_acc[0], block_scale);
+ acc[cp + 4] = vmlaq_s32(acc[cp + 4], sb_acc[2], block_scale);
+
+ const int8x16_t qs_hi_3 = vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(qs_cp_3, 4), hbit_hi_3));
+ acc_1 = vmmlaq_s32(acc_1, qs_hi_3, q8s[0][7]);
+ sb_acc[1] = acc_1;
+ acc_3 = vmmlaq_s32(acc_3, qs_hi_3, q8s[1][7]);
+ sb_acc[3] = acc_3;
+
+ const int32_t s2 = q5sb_scales[1][scale_offset];
+ const int32_t s3 = q5sb_scales[1][scale_offset + 1];
+ const int32x4_t block_scale2 = vcombine_s32(vdup_n_s32(s2), vdup_n_s32(s3));
+ acc[cp] = vmlaq_s32(acc[cp], sb_acc[1], block_scale2);
+ acc[cp + 4] = vmlaq_s32(acc[cp + 4], sb_acc[3], block_scale2);
+ }
+
+ // Multiply Acc bsum + mins
+ for (int q8_row = 0; q8_row < 4; q8_row++) {
+ // Each pair of subblocks share the same bsums
+ // Load scalar bsum → broadcast to a vector (vdupq_n_s16(s)).
+ int16x4_t bsums_vec_lo = vdup_n_s16(bsums_arr[sb][q8_row * 2]);
+ int16x4_t bsums_vec_hi = vdup_n_s16(bsums_arr[sb][q8_row * 2 + 1]);
+
+ bias_acc[2 * q8_row] =
+ vmlal_s16(bias_acc[2 * q8_row], bsums_vec_lo, vget_low_s16(q5sb_mins[0]));
+ bias_acc[2 * q8_row] =
+ vmlal_s16(bias_acc[2 * q8_row], bsums_vec_hi, vget_low_s16(q5sb_mins[1]));
+ bias_acc[2 * q8_row + 1] =
+ vmlal_s16(bias_acc[2 * q8_row + 1], bsums_vec_lo, vget_high_s16(q5sb_mins[0]));
+ bias_acc[2 * q8_row + 1] =
+ vmlal_s16(bias_acc[2 * q8_row + 1], bsums_vec_hi, vget_high_s16(q5sb_mins[1]));
+ }
+ } // for sb
+
+ // Reorder of i8mm output with bias and output layout
+ for (int i = 0; i < 8; i++) {
+ int32x2x2_t aux = vzip_s32(vget_low_s32(acc[i]), vget_high_s32(acc[i]));
+ acc[i] = vcombine_s32(aux.val[0], aux.val[1]);
+ }
+ int32x4_t reorder_acc[8] = {
+ vcombine_s32(vget_low_s32(acc[0]), vget_low_s32(acc[1])),
+ vcombine_s32(vget_low_s32(acc[2]), vget_low_s32(acc[3])),
+ vcombine_s32(vget_high_s32(acc[0]), vget_high_s32(acc[1])),
+ vcombine_s32(vget_high_s32(acc[2]), vget_high_s32(acc[3])),
+ vcombine_s32(vget_low_s32(acc[4]), vget_low_s32(acc[5])),
+ vcombine_s32(vget_low_s32(acc[6]), vget_low_s32(acc[7])),
+ vcombine_s32(vget_high_s32(acc[4]), vget_high_s32(acc[5])),
+ vcombine_s32(vget_high_s32(acc[6]), vget_high_s32(acc[7])),
+ };
+
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ for (int j = 0; j < 2; j++) {
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d[i]);
+ float32x4_t q5_dmin = vcvt_f32_f16(vld1_f16((const __fp16 *) (q5_ptr[b].dmin + j * 4)));
+ const float32x4_t dmins = vmulq_f32(q5_dmin, q8_d);
+
+ float32x4_t q5_d = vcvt_f32_f16(vld1_f16((const __fp16 *) (q5_ptr[b].d + j * 4)));
+ const float32x4_t scale = vmulq_f32(q5_d, q8_d);
+
+ acc_f32[2 * i + j] = vmlsq_f32(acc_f32[2 * i + j], vcvtq_f32_s32(bias_acc[2 * i + j]), dmins);
+ acc_f32[2 * i + j] =
+ vmlaq_f32(acc_f32[2 * i + j], vcvtq_f32_s32(reorder_acc[2 * i + j]), scale);
+ }
+ }
+ } // for b
+
+ // With the previous reorder, the tile is already in the correct memory layout.
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ int row = y * q8_k_blocklen + i;
+ for (int j = 0; j < 2; j++) {
+ int col = x * ncols_interleaved + j * 4;
+ int offset = row * bs + col;
+ vst1q_f32(s + offset, acc_f32[2 * i + j]);
+ }
+ }
+ } // for x
+ } // for y
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ ggml_gemm_q5_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q6_K_8x4_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 4;
+
+ assert(n % qk == 0);
+ assert(nr % 4 == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ constexpr int q8_k_blocklen = 4;
+ constexpr int col_groups = ncols_interleaved / 4;
+ constexpr int acc_size = q8_k_blocklen * col_groups; // 4 rows, 2 column groups
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ const uint8x16_t mask_lo = vdupq_n_u8(0x03);
+ const uint8x16_t mask_hi = vdupq_n_u8(0x30);
+ const int8x16_t m32s = vdupq_n_s8(32);
+
+ float32x4_t acc_f32[acc_size];
+
+ for (int y = 0; y < nr / q8_k_blocklen; y++) {
+ const block_q8_Kx4 * GGML_RESTRICT q8_ptr = (const block_q8_Kx4 *) vy + (y * nb);
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q6_Kx8 * GGML_RESTRICT q6_ptr = (const block_q6_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < acc_size; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ float32x4_t q6_d_0123 = vcvt_f32_f16(vld1_f16((const __fp16 *) q6_ptr[b].d));
+ float32x4_t q6_d_4567 = vcvt_f32_f16(vld1_f16((const __fp16 *) q6_ptr[b].d + 4));
+ float32x4_t q8_d_0123 = vld1q_f32(q8_ptr[b].d);
+
+ float32x4_t sbd_scale_0123[q8_k_blocklen];
+ float32x4_t sbd_scale_4567[q8_k_blocklen];
+
+ sbd_scale_0123[0] = vmulq_laneq_f32(q6_d_0123, q8_d_0123, 0);
+ sbd_scale_4567[0] = vmulq_laneq_f32(q6_d_4567, q8_d_0123, 0);
+ sbd_scale_0123[1] = vmulq_laneq_f32(q6_d_0123, q8_d_0123, 1);
+ sbd_scale_4567[1] = vmulq_laneq_f32(q6_d_4567, q8_d_0123, 1);
+ sbd_scale_0123[2] = vmulq_laneq_f32(q6_d_0123, q8_d_0123, 2);
+ sbd_scale_4567[2] = vmulq_laneq_f32(q6_d_4567, q8_d_0123, 2);
+ sbd_scale_0123[3] = vmulq_laneq_f32(q6_d_0123, q8_d_0123, 3);
+ sbd_scale_4567[3] = vmulq_laneq_f32(q6_d_4567, q8_d_0123, 3);
+
+ int32x4_t acc_s32[acc_size];
+ for (int i = 0; i < acc_size; i++) {
+ acc_s32[i] = vdupq_n_s32(0);
+ }
+
+ int16_t q6_scales[8 * 16];
+ for (int i = 0; i < 16; i++) {
+ int16x8_t scales = vmovl_s8(vld1_s8(q6_ptr[b].scales + i * 8));
+ vst1q_s16(q6_scales + i * 8, scales);
+ }
+
+ for (int half = 0; half < 2; half++) {
+ const uint8_t * ql_base = q6_ptr[b].ql + half * 512;
+ const uint8_t * qh_base = q6_ptr[b].qh + half * 256;
+
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ int32x4_t acc_lo[acc_size];
+ int32x4_t acc_hi[acc_size];
+ for (int i = 0; i < acc_size; i++) {
+ acc_lo[i] = vdupq_n_s32(0);
+ acc_hi[i] = vdupq_n_s32(0);
+ }
+
+ const int8_t * q8_base_l = q8_ptr[b].qs + half * 512 + sb * 64;
+ const int8_t * q8_base_h = q8_ptr[b].qs + half * 512 + 256 + sb * 64;
+
+ // 4 rows * 16 elements per scale
+ // 4 reads of 16 bytes each
+ constexpr int reads_per_sb = 4;
+ int8x16_t q8_l[reads_per_sb];
+ int8x16_t q8_h[reads_per_sb];
+ for (int k = 0; k < reads_per_sb; k++) {
+ q8_l[k] = vld1q_s8(q8_base_l + 16 * k);
+ q8_h[k] = vld1q_s8(q8_base_h + 16 * k);
+ }
+
+ const int ql_off_base = sb * QK_K / 2;
+ const int qh_off_base = ql_off_base & 255;
+
+ uint8x16_t q6_ql_0123[reads_per_sb];
+ uint8x16_t q6_ql_4567[reads_per_sb];
+ uint8x16_t q6_qh_0123[reads_per_sb];
+ uint8x16_t q6_qh_4567[reads_per_sb];
+
+ for (int k = 0; k < reads_per_sb; k++) {
+ q6_ql_0123[k] = vld1q_u8(ql_base + ql_off_base + k * 32);
+ q6_ql_4567[k] = vld1q_u8(ql_base + ql_off_base + k * 32 + 16);
+ q6_qh_0123[k] = vld1q_u8(qh_base + qh_off_base + k * 32);
+ q6_qh_4567[k] = vld1q_u8(qh_base + qh_off_base + k * 32 + 16);
+ }
+
+ if (sb > 1) {
+ for (int k = 0; k < reads_per_sb; k++) {
+ q6_qh_0123[k] = vshrq_n_u8(q6_qh_0123[k], 2);
+ q6_qh_4567[k] = vshrq_n_u8(q6_qh_4567[k], 2);
+ }
+ }
+
+ for (int k = 0; k < reads_per_sb; k++) {
+ // q = (ql | qh) - 32
+ const uint8x16_t hbit_lo_0123 = vandq_u8(q6_qh_0123[k], mask_lo);
+ const uint8x16_t hbit_hi_0123 = vandq_u8(q6_qh_0123[k], mask_hi);
+ const uint8x16_t hbit_lo_4567 = vandq_u8(q6_qh_4567[k], mask_lo);
+ const uint8x16_t hbit_hi_4567 = vandq_u8(q6_qh_4567[k], mask_hi);
+
+ const int8x16_t q6_0123_lo = vsubq_s8(
+ vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(q6_ql_0123[k], m4b), hbit_lo_0123, 4)), m32s);
+ const int8x16_t q6_0123_hi = vsubq_s8(
+ vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6_ql_0123[k], 4), hbit_hi_0123)), m32s);
+
+ acc_lo[0] = vdotq_laneq_s32(acc_lo[0], q6_0123_lo, q8_l[k], 0); // 0..3 r0 c0123
+ acc_lo[1] = vdotq_laneq_s32(acc_lo[1], q6_0123_lo, q8_l[k], 1); // 0..3 r1 c0123
+ acc_lo[2] = vdotq_laneq_s32(acc_lo[2], q6_0123_lo, q8_l[k], 2); // 0..3 r2 c0123
+ acc_lo[3] = vdotq_laneq_s32(acc_lo[3], q6_0123_lo, q8_l[k], 3); // 0..3 r3 c0123
+
+ acc_hi[0] = vdotq_laneq_s32(acc_hi[0], q6_0123_hi, q8_h[k], 0); // 64..67 r0 c0123
+ acc_hi[1] = vdotq_laneq_s32(acc_hi[1], q6_0123_hi, q8_h[k], 1); // 64..67 r1 c0123
+ acc_hi[2] = vdotq_laneq_s32(acc_hi[2], q6_0123_hi, q8_h[k], 2); // 64..67 r2 c0123
+ acc_hi[3] = vdotq_laneq_s32(acc_hi[3], q6_0123_hi, q8_h[k], 3); // 64..67 r3 c0123
+
+ const int8x16_t q6_4567_lo = vsubq_s8(
+ vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(q6_ql_4567[k], m4b), hbit_lo_4567, 4)), m32s);
+ const int8x16_t q6_4567_hi = vsubq_s8(
+ vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6_ql_4567[k], 4), hbit_hi_4567)), m32s);
+
+ acc_lo[4] = vdotq_laneq_s32(acc_lo[4], q6_4567_lo, q8_l[k], 0); // 0..3 r0 c4567
+ acc_lo[5] = vdotq_laneq_s32(acc_lo[5], q6_4567_lo, q8_l[k], 1); // 0..3 r1 c4567
+ acc_lo[6] = vdotq_laneq_s32(acc_lo[6], q6_4567_lo, q8_l[k], 2); // 0..3 r2 c4567
+ acc_lo[7] = vdotq_laneq_s32(acc_lo[7], q6_4567_lo, q8_l[k], 3); // 0..3 r3 c4567
+
+ acc_hi[4] = vdotq_laneq_s32(acc_hi[4], q6_4567_hi, q8_h[k], 0); // 64..67 r0 c4567
+ acc_hi[5] = vdotq_laneq_s32(acc_hi[5], q6_4567_hi, q8_h[k], 1); // 64..67 r1 c4567
+ acc_hi[6] = vdotq_laneq_s32(acc_hi[6], q6_4567_hi, q8_h[k], 2); // 64..67 r2 c4567
+ acc_hi[7] = vdotq_laneq_s32(acc_hi[7], q6_4567_hi, q8_h[k], 3); // 64..67 r3 c4567
+ }
+
+ // Scale and bias
+ const int scale_idx_l = half * 8 + sb;
+ const int scale_idx_h = half * 8 + sb + 4;
+
+ for (int g = 0; g < col_groups; g++) {
+ const int16x4_t scales_l16 = vld1_s16(q6_scales + scale_idx_l * 8 + g * 4);
+ const int16x4_t scales_h16 = vld1_s16(q6_scales + scale_idx_h * 8 + g * 4);
+ const int32x4_t scale_vec_l = vmovl_s16(scales_l16);
+ const int32x4_t scale_vec_h = vmovl_s16(scales_h16);
+ const int acc_offset = g * q8_k_blocklen;
+
+ for (int row = 0; row < q8_k_blocklen; row++) {
+ const int idx = row * 2 + g;
+ acc_s32[idx] = vmlaq_s32(acc_s32[idx], acc_lo[acc_offset + row], scale_vec_l);
+ acc_s32[idx] = vmlaq_s32(acc_s32[idx], acc_hi[acc_offset + row], scale_vec_h);
+ }
+ }
+ }
+ }
+
+ // Finally we apply the superblock scales
+ for (int row = 0; row < q8_k_blocklen; row++) {
+ const int idx0 = 2 * row;
+ const int idx1 = 2 * row + 1;
+ const int32x4_t acc_0123 = acc_s32[idx0];
+ const int32x4_t acc_4567 = acc_s32[idx1];
+
+ acc_f32[idx0] = vmlaq_f32(acc_f32[idx0], vcvtq_f32_s32(acc_0123), sbd_scale_0123[row]);
+ acc_f32[idx1] = vmlaq_f32(acc_f32[idx1], vcvtq_f32_s32(acc_4567), sbd_scale_4567[row]);
+ }
+ } // for b
+
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ int row = y * q8_k_blocklen + i;
+ for (int j = 0; j < 2; j++) {
+ int col = x * ncols_interleaved + j * 4;
+ int offset = row * bs + col;
+ vst1q_f32(s + offset, acc_f32[2 * i + j]);
+ }
+ }
+ } // for x
+ } // for y
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemm_q6_K_8x4_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q6_K_8x8_q8_K(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ constexpr int qk = QK_K;
+ const int nb = n / qk;
+
+ constexpr int ncols_interleaved = 8;
+ constexpr int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nr % 4 == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ constexpr int q8_k_blocklen = 4;
+ const uint8x16_t m4b = vdupq_n_u8(0x0f);
+ const uint8x16_t mask_lo = vdupq_n_u8(0x03);
+ const uint8x16_t mask_hi = vdupq_n_u8(0x30);
+ const int8x16_t m32s = vdupq_n_s8(32);
+
+ // 8 accumulators: 4 q8 rows × 2 col groups (0-3, 4-7)
+ float32x4_t acc_f32[blocklen];
+
+ for (int y = 0; y < nr / q8_k_blocklen; y++) {
+ const block_q8_Kx4 * GGML_RESTRICT q8_ptr = (const block_q8_Kx4 *) vy + (y * nb);
+
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q6_Kx8 * GGML_RESTRICT q6_ptr = (const block_q6_Kx8 *) vx + (x * nb);
+
+ for (int i = 0; i < blocklen; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ int32x4_t acc[8]; // rows 01 stored in [0][1][2][3], rows 23 stored in [4][5][6][7]
+ for (int i = 0; i < 8; i++) {
+ acc[i] = vdupq_n_s32(0);
+ }
+
+ // Q6_K has simple 8-bit scales, 16 per block (one per 16 values)
+ // Reused for bias and dequantization later
+ int16_t q6_scales[16 * 8];
+ for (int i = 0; i < 16; ++i) {
+ int16x8_t s16 = vmovl_s8(vld1_s8(q6_ptr[b].scales + i * 8));
+ vst1q_s16(q6_scales + i * 8, s16);
+ }
+
+ // Process two 128-value halves per superblock
+ for (int half = 0; half < 2; half++) {
+
+ const uint8_t * ql_base = q6_ptr[b].ql + half * 512;
+ const uint8_t * qh_base = q6_ptr[b].qh + half * 256;
+
+ // A subblock (sb) is a set of weights that share the scale
+ // Since q6_K scales are per 16 elements
+ // num sbs -> 256 elements / (16 elements/scale * 2 elements/byte * 2 halves)
+ for (int sb = 0; sb < QK_K / 64; sb++) {
+ // Q6_K weight index increasing by 64 instead of 32 requires
+ // loading various q8 memory regions
+ const int8_t * q8_base_l = q8_ptr[b].qs + half * 512 + sb * 64;
+ const int8_t * q8_base_h = q8_ptr[b].qs + half * 512 + 256 + sb * 64;
+
+ int8x16_t q8_l_01[2];
+ int8x16_t q8_l_23[2];
+ for (int i = 0; i < 2; i++) {
+ const int offset = i * 32;
+ q8_l_01[i] = vld1q_s8(q8_base_l + offset); // 0..7 & 8..15 (r01)
+ q8_l_23[i] = vld1q_s8(q8_base_l + offset + 16); // 0..7 & 8..15 (r23)
+ }
+
+ int8x16_t q8_h_01[2];
+ int8x16_t q8_h_23[2];
+ for (int i = 0; i < 2; i++) {
+ const int offset = i * 32;
+ q8_h_01[i] = vld1q_s8(q8_base_h + offset);
+ q8_h_23[i] = vld1q_s8(q8_base_h + offset + 16);
+ }
+
+ const int ql_off_base = sb * QK_K / 2;
+
+ uint8x16_t q6_ql_0[4];
+ uint8x16_t q6_ql_1[4];
+ for (int k = 0; k < 4; k++) {
+ q6_ql_0[k] = vld1q_u8(ql_base + ql_off_base + 16 * k);
+ q6_ql_1[k] = vld1q_u8(ql_base + ql_off_base + 64 + 16 * k);
+ }
+
+ const int qh_off_base = (sb * QK_K / 2) & 255; // wrap after 256 bytes
+ uint8x16_t q6_qh_0[4];
+ uint8x16_t q6_qh_1[4];
+ for (int k = 0; k < 4; k++) {
+ q6_qh_0[k] = vld1q_u8(qh_base + qh_off_base + 16 * k);
+ q6_qh_1[k] = vld1q_u8(qh_base + qh_off_base + 64 + 16 * k);
+ }
+
+ // Adjust for the proper high bits (Sb 2 and 3)
+ if (sb > 1) {
+ for (int k = 0; k < 4; k++) {
+ q6_qh_0[k] = vshrq_n_u8(q6_qh_0[k], 2);
+ q6_qh_1[k] = vshrq_n_u8(q6_qh_1[k], 2);
+ }
+ }
+
+ // Process column pairs (0-1, 2-3, 4-5, 6-7)
+ for (int cp = 0; cp < ncols_interleaved / 2; cp++) {
+ const uint8x16_t q6_qs_cp_0_l = q6_ql_0[cp];
+ const uint8x16_t q6_qs_cp_1_l = q6_ql_1[cp];
+ const uint8x16_t q6_qs_cp_0_h = q6_qh_0[cp];
+ const uint8x16_t q6_qs_cp_1_h = q6_qh_1[cp];
+
+ // Extract high 2 bits for upper nibble reconstruction
+ const uint8x16_t q6_qs_cp_0_hh = vandq_u8(q6_qs_cp_0_h, mask_hi);
+ const uint8x16_t q6_qs_cp_1_hh = vandq_u8(q6_qs_cp_1_h, mask_hi);
+
+ // q6 = (low4 | high2<<4) - 32
+ // Use vsliq_n_u8 to combine shift-left-insert in one instruction (like Q5_K)
+ const int8x16_t q6_l0 = vsubq_s8(
+ vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(q6_qs_cp_0_l, m4b), vandq_u8(q6_qs_cp_0_h, mask_lo), 4)),
+ m32s);
+ const int8x16_t q6_l1 = vsubq_s8(
+ vreinterpretq_s8_u8(vsliq_n_u8(vandq_u8(q6_qs_cp_1_l, m4b), vandq_u8(q6_qs_cp_1_h, mask_lo), 4)),
+ m32s);
+ const int8x16_t q6_h0 = vsubq_s8(
+ vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6_qs_cp_0_l, 4), q6_qs_cp_0_hh)), m32s);
+ const int8x16_t q6_h1 = vsubq_s8(
+ vreinterpretq_s8_u8(vorrq_u8(vshrq_n_u8(q6_qs_cp_1_l, 4), q6_qs_cp_1_hh)), m32s);
+
+ // row pair 0, base_l
+ int32x4_t sb_acc_0l = vmmlaq_s32(vdupq_n_s32(0), q6_l0, q8_l_01[0]);
+ sb_acc_0l = vmmlaq_s32(sb_acc_0l, q6_l1, q8_l_01[1]);
+ // row pair 0, base_h
+ int32x4_t sb_acc_0h = vmmlaq_s32(vdupq_n_s32(0), q6_h0, q8_h_01[0]);
+ sb_acc_0h = vmmlaq_s32(sb_acc_0h, q6_h1, q8_h_01[1]);
+ // row pair 1, base_l
+ int32x4_t sb_acc_1l = vmmlaq_s32(vdupq_n_s32(0), q6_l0, q8_l_23[0]);
+ sb_acc_1l = vmmlaq_s32(sb_acc_1l, q6_l1, q8_l_23[1]);
+ // row pair 1, base_h
+ int32x4_t sb_acc_1h = vmmlaq_s32(vdupq_n_s32(0), q6_h0, q8_h_23[0]);
+ sb_acc_1h = vmmlaq_s32(sb_acc_1h, q6_h1, q8_h_23[1]);
+
+ const int scale_idx_l = half * 8 + sb;
+ const int scale_idx_h = half * 8 + sb + 4;
+
+ const int32x4_t scale_vec_l = {
+ q6_scales[scale_idx_l * 8 + cp * 2 + 0],
+ q6_scales[scale_idx_l * 8 + cp * 2 + 0],
+ q6_scales[scale_idx_l * 8 + cp * 2 + 1],
+ q6_scales[scale_idx_l * 8 + cp * 2 + 1],
+ };
+ const int32x4_t scale_vec_h = {
+ q6_scales[scale_idx_h * 8 + cp * 2 + 0],
+ q6_scales[scale_idx_h * 8 + cp * 2 + 0],
+ q6_scales[scale_idx_h * 8 + cp * 2 + 1],
+ q6_scales[scale_idx_h * 8 + cp * 2 + 1],
+ };
+
+ acc[cp] = vmlaq_s32(acc[cp], sb_acc_0l, scale_vec_l);
+ acc[cp] = vmlaq_s32(acc[cp], sb_acc_0h, scale_vec_h);
+ acc[cp + 4] = vmlaq_s32(acc[cp + 4], sb_acc_1l, scale_vec_l);
+ acc[cp + 4] = vmlaq_s32(acc[cp + 4], sb_acc_1h, scale_vec_h);
+ }
+ }
+ } // for half
+
+ // Reorder i8mm output to match memory layout
+ for (int i = 0; i < 8; i++) {
+ int32x2x2_t aux = vzip_s32(vget_low_s32(acc[i]), vget_high_s32(acc[i]));
+ acc[i] = vcombine_s32(aux.val[0], aux.val[1]);
+ }
+ int32x4_t reorder_acc[8] = {
+ vcombine_s32(vget_low_s32(acc[0]), vget_low_s32(acc[1])),
+ vcombine_s32(vget_low_s32(acc[2]), vget_low_s32(acc[3])),
+ vcombine_s32(vget_high_s32(acc[0]), vget_high_s32(acc[1])),
+ vcombine_s32(vget_high_s32(acc[2]), vget_high_s32(acc[3])),
+ vcombine_s32(vget_low_s32(acc[4]), vget_low_s32(acc[5])),
+ vcombine_s32(vget_low_s32(acc[6]), vget_low_s32(acc[7])),
+ vcombine_s32(vget_high_s32(acc[4]), vget_high_s32(acc[5])),
+ vcombine_s32(vget_high_s32(acc[6]), vget_high_s32(acc[7])),
+ };
+
+ // Apply superblock scale (no mins for q6_K)
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ for (int j = 0; j < 2; j++) {
+ float32x4_t q8_d = vdupq_n_f32(q8_ptr[b].d[i]);
+ float32x4_t q6_d = vcvt_f32_f16(vld1_f16((const __fp16 *) (q6_ptr[b].d + j * 4)));
+ const float32x4_t scale = vmulq_f32(q6_d, q8_d);
+
+ acc_f32[2 * i + j] =
+ vmlaq_f32(acc_f32[2 * i + j], vcvtq_f32_s32(reorder_acc[2 * i + j]), scale);
+ }
+ }
+ } // for b
+
+ // Store results
+ for (int i = 0; i < q8_k_blocklen; i++) {
+ int row = y * q8_k_blocklen + i;
+ for (int j = 0; j < 2; j++) {
+ int col = x * ncols_interleaved + j * 4;
+ int offset = row * bs + col;
+ vst1q_f32(s + offset, acc_f32[2 * i + j]);
+ }
+ }
+ } // for x
+ } // for y
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ ggml_gemm_q6_K_8x8_q8_K_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q8_0_4x4_q8_0(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 4;
+
+ assert(n % qk == 0);
+ assert(nr % 4 == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ for (int y = 0; y < nr / 4; y++) {
+ const block_q8_0x4 * a_ptr = (const block_q8_0x4 *) vy + (y * nb);
+ for (int x = 0; x < nc / ncols_interleaved; x++) {
+ const block_q8_0x4 * b_ptr = (const block_q8_0x4 *) vx + (x * nb);
+
+ float32x4_t sumf[4];
+ for (int m = 0; m < 4; m++) {
+ sumf[m] = vdupq_n_f32(0);
+ }
+
+ for (int l = 0; l < nb; l++) {
+ float32x4_t a_d = vcvt_f32_f16(vld1_f16((const float16_t *) a_ptr[l].d));
+ float32x4_t b_d = vcvt_f32_f16(vld1_f16((const float16_t *) b_ptr[l].d));
+
+ int32x4_t sumi_0 = vdupq_n_s32(0);
+ int32x4_t sumi_1 = vdupq_n_s32(0);
+ int32x4_t sumi_2 = vdupq_n_s32(0);
+ int32x4_t sumi_3 = vdupq_n_s32(0);
+
+ for (int k_group = 0; k_group < 8; k_group += 4) {
+ int8x16x4_t a = vld1q_s8_x4(a_ptr[l].qs + 16 * k_group);
+ int8x16x4_t b = vld1q_s8_x4(b_ptr[l].qs + 16 * k_group);
+
+ for (int k = 0; k < 4; k++) {
+ sumi_0 = vdotq_laneq_s32(sumi_0, b.val[k], a.val[k], 0);
+ sumi_1 = vdotq_laneq_s32(sumi_1, b.val[k], a.val[k], 1);
+ sumi_2 = vdotq_laneq_s32(sumi_2, b.val[k], a.val[k], 2);
+ sumi_3 = vdotq_laneq_s32(sumi_3, b.val[k], a.val[k], 3);
+ }
+ }
+
+ sumf[0] = vmlaq_f32(sumf[0], vmulq_laneq_f32(b_d, a_d, 0), vcvtq_f32_s32(sumi_0));
+ sumf[1] = vmlaq_f32(sumf[1], vmulq_laneq_f32(b_d, a_d, 1), vcvtq_f32_s32(sumi_1));
+ sumf[2] = vmlaq_f32(sumf[2], vmulq_laneq_f32(b_d, a_d, 2), vcvtq_f32_s32(sumi_2));
+ sumf[3] = vmlaq_f32(sumf[3], vmulq_laneq_f32(b_d, a_d, 3), vcvtq_f32_s32(sumi_3));
+ }
+
+ for (int m = 0; m < 4; m++) {
+ vst1q_f32(s + (y * 4 + m) * bs + x * 4, sumf[m]);
+ }
+ }
+ }
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_DOTPROD)
+ ggml_gemm_q8_0_4x4_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}
+
+void ggml_gemm_q8_0_4x8_q8_0(int n,
+ float * GGML_RESTRICT s,
+ size_t bs,
+ const void * GGML_RESTRICT vx,
+ const void * GGML_RESTRICT vy,
+ int nr,
+ int nc) {
+ const int qk = QK8_0;
+ const int nb = n / qk;
+ const int ncols_interleaved = 4;
+ const int blocklen = 8;
+
+ assert(n % qk == 0);
+ assert(nr % 4 == 0);
+ assert(nc % ncols_interleaved == 0);
+
+ UNUSED(nb);
+ UNUSED(ncols_interleaved);
+ UNUSED(blocklen);
+
+#if defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ const block_q8_0x4 * b_ptr_base = (const block_q8_0x4 *) vx;
+
+ for (int y = 0; y < nr; y += 4) {
+ const block_q8_0x4 * a_ptr_base = (const block_q8_0x4 *) vy + (y / 4) * nb;
+
+ for (int x = 0; x < nc; x += ncols_interleaved) {
+ const block_q8_0x4 * b_ptr = b_ptr_base + (x / 4) * nb;
+ const block_q8_0x4 * a_ptr = a_ptr_base;
+
+ float32x4_t acc_f32[4];
+ for (int i = 0; i < 4; i++) {
+ acc_f32[i] = vdupq_n_f32(0);
+ }
+
+ for (int b = 0; b < nb; b++) {
+ int32x4_t acc[4];
+ for (int i = 0; i < 4; i++) {
+ acc[i] = vdupq_n_s32(0);
+ }
+
+ // Process 4 chunks of 8 positions each
+ for (int chunk = 0; chunk < 4; chunk++) {
+ int8x16_t a01 = vld1q_s8(a_ptr->qs + chunk * 32);
+ int8x16_t a23 = vld1q_s8(a_ptr->qs + chunk * 32 + 16);
+ int8x16_t b01 = vld1q_s8(b_ptr->qs + chunk * 32);
+ int8x16_t b23 = vld1q_s8(b_ptr->qs + chunk * 32 + 16);
+
+ acc[0] = vmmlaq_s32(acc[0], a01, b01);
+ acc[1] = vmmlaq_s32(acc[1], a01, b23);
+ acc[2] = vmmlaq_s32(acc[2], a23, b01);
+ acc[3] = vmmlaq_s32(acc[3], a23, b23);
+ }
+
+ // Reorder outputs from 2×2 tiles to row-major
+ // acc[0] = [r0c0, r0c1, r1c0, r1c1]
+ // acc[1] = [r0c2, r0c3, r1c2, r1c3]
+ // acc[2] = [r2c0, r2c1, r3c0, r3c1]
+ // acc[3] = [r2c2, r2c3, r3c2, r3c3]
+ int32x4_t row0 = vcombine_s32(vget_low_s32(acc[0]), vget_low_s32(acc[1]));
+ int32x4_t row1 = vcombine_s32(vget_high_s32(acc[0]), vget_high_s32(acc[1]));
+ int32x4_t row2 = vcombine_s32(vget_low_s32(acc[2]), vget_low_s32(acc[3]));
+ int32x4_t row3 = vcombine_s32(vget_high_s32(acc[2]), vget_high_s32(acc[3]));
+
+ // Scales
+ float32x4_t a_d = vcvt_f32_f16(vld1_f16((const __fp16 *) a_ptr->d));
+ float32x4_t b_d = vcvt_f32_f16(vld1_f16((const __fp16 *) b_ptr->d));
+
+ acc_f32[0] = vfmaq_f32(acc_f32[0], vcvtq_f32_s32(row0), vmulq_laneq_f32(b_d, a_d, 0));
+ acc_f32[1] = vfmaq_f32(acc_f32[1], vcvtq_f32_s32(row1), vmulq_laneq_f32(b_d, a_d, 1));
+ acc_f32[2] = vfmaq_f32(acc_f32[2], vcvtq_f32_s32(row2), vmulq_laneq_f32(b_d, a_d, 2));
+ acc_f32[3] = vfmaq_f32(acc_f32[3], vcvtq_f32_s32(row3), vmulq_laneq_f32(b_d, a_d, 3));
+
+ a_ptr++;
+ b_ptr++;
+ }
+
+ for (int row = 0; row < 4; row++) {
+ vst1q_f32(s + (y + row) * bs + x, acc_f32[row]);
+ }
+ }
+ }
+ return;
+#endif // defined(__aarch64__) && defined(__ARM_NEON) && defined(__ARM_FEATURE_MATMUL_INT8)
+ ggml_gemm_q8_0_4x8_q8_0_generic(n, s, bs, vx, vy, nr, nc);
+}