summaryrefslogtreecommitdiff
path: root/llama.cpp/tests/test-autorelease.cpp
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-12 20:57:17 +0100
commitb333b06772c89d96aacb5490d6a219fba7c09cc6 (patch)
tree211df60083a5946baa2ed61d33d8121b7e251b06 /llama.cpp/tests/test-autorelease.cpp
downloadllmnpc-b333b06772c89d96aacb5490d6a219fba7c09cc6.tar.gz
Engage!
Diffstat (limited to 'llama.cpp/tests/test-autorelease.cpp')
-rw-r--r--llama.cpp/tests/test-autorelease.cpp24
1 files changed, 24 insertions, 0 deletions
diff --git a/llama.cpp/tests/test-autorelease.cpp b/llama.cpp/tests/test-autorelease.cpp
new file mode 100644
index 0000000..ca87c56
--- /dev/null
+++ b/llama.cpp/tests/test-autorelease.cpp
@@ -0,0 +1,24 @@
+// ref: https://github.com/ggml-org/llama.cpp/issues/4952#issuecomment-1892864763
+
+#include <cstdio>
+#include <string>
+#include <thread>
+
+#include "llama.h"
+#include "get-model.h"
+
+// This creates a new context inside a pthread and then tries to exit cleanly.
+int main(int argc, char ** argv) {
+ auto * model_path = get_model_or_exit(argc, argv);
+
+ std::thread([&model_path]() {
+ llama_backend_init();
+ auto * model = llama_model_load_from_file(model_path, llama_model_default_params());
+ auto * ctx = llama_init_from_model(model, llama_context_default_params());
+ llama_free(ctx);
+ llama_model_free(model);
+ llama_backend_free();
+ }).join();
+
+ return 0;
+}