summaryrefslogtreecommitdiff
path: root/llama.cpp/common/ngram-mod.h
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/common/ngram-mod.h
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/common/ngram-mod.h')
-rw-r--r--llama.cpp/common/ngram-mod.h38
1 files changed, 38 insertions, 0 deletions
diff --git a/llama.cpp/common/ngram-mod.h b/llama.cpp/common/ngram-mod.h
new file mode 100644
index 0000000..7af92e9
--- /dev/null
+++ b/llama.cpp/common/ngram-mod.h
@@ -0,0 +1,38 @@
+#pragma once
+
+#include <cstdint>
+#include <vector>
+#include <cstddef>
+
+//
+// common_ngram_mod
+// ref: https://github.com/ggml-org/llama.cpp/pull/19164
+//
+
+// basic n-gram hasher
+struct common_ngram_mod {
+ using entry_t = int32_t;
+
+ static constexpr entry_t EMPTY = -1;
+
+ common_ngram_mod(uint16_t n, size_t size);
+
+ size_t idx(const entry_t * tokens) const;
+ void add(const entry_t * tokens);
+ entry_t get(const entry_t * tokens) const; // return -1 if not found
+
+ void reset();
+
+ size_t get_n() const;
+ size_t get_used() const;
+
+ size_t size() const;
+ size_t size_bytes() const;
+
+private:
+ size_t n; // ngram size to hash
+
+ size_t used;
+
+ std::vector<entry_t> entries;
+};