summaryrefslogtreecommitdiff
path: root/models.h
diff options
context:
space:
mode:
authorMitja Felicijan <mitja.felicijan@gmail.com>2026-02-13 18:07:45 +0100
committerMitja Felicijan <mitja.felicijan@gmail.com>2026-02-13 18:07:45 +0100
commit2a6fd554998c64733e2d97aecf653f6e48e0f8b4 (patch)
treef53c2f0b4dc42825f426ada0ebb591dbb80c89e9 /models.h
parenta1a595a3305727d30e16e856f4faf95980643e1c (diff)
downloadllmnpc-2a6fd554998c64733e2d97aecf653f6e48e0f8b4.tar.gz
Store context documents to Vector Database
Diffstat (limited to 'models.h')
-rw-r--r--models.h110
1 files changed, 55 insertions, 55 deletions
diff --git a/models.h b/models.h
index ac242fc..e296971 100644
--- a/models.h
+++ b/models.h
@@ -6,64 +6,64 @@
#include <string.h>
typedef struct {
- const char *name;
- const char *filepath;
- int n_gpu_layers;
- bool use_mmap;
- int n_ctx;
- int n_batch;
- bool embeddings;
- float temperature;
- float min_p;
- uint32_t seed;
-} model_config;
+ const char *name;
+ const char *filepath;
+ int n_gpu_layers;
+ bool use_mmap;
+ int n_ctx;
+ int n_batch;
+ bool embeddings;
+ float temperature;
+ float min_p;
+ uint32_t seed;
+} ModelConfig;
-model_config models[] = {
- {
- .name = "flan-t5-small",
- .filepath = "models/flan-t5-small.F16.gguf",
- .n_gpu_layers = 0,
- .use_mmap = false,
- .n_ctx = 512,
- .n_batch = 512,
- .embeddings = false,
- .temperature = 0.8f,
- .min_p = 0.05f,
- .seed = LLAMA_DEFAULT_SEED,
- },
- {
- .name = "phi-4-mini-instruct",
- .filepath = "models/Phi-4-mini-instruct.Q2_K.gguf",
- .n_gpu_layers = 0,
- .use_mmap = false,
- .n_ctx = 131072,
- .n_batch = 4096,
- .embeddings = false,
- .temperature = 0.8f,
- .min_p = 0.05f,
- .seed = LLAMA_DEFAULT_SEED,
- },
- {
- .name = "tinyllama-1",
- .filepath = "models/TinyLlama-1.1B-intermediate-step-1431k-3T-Q2_K.gguf",
- .n_gpu_layers = 0,
- .use_mmap = false,
- .n_ctx = 2048,
- .n_batch = 4096,
- .embeddings = false,
- .temperature = 0.8f,
- .min_p = 0.05f,
- .seed = LLAMA_DEFAULT_SEED,
- },
+ModelConfig models[] = {
+ {
+ .name = "tinyllama-1",
+ .filepath = "models/TinyLlama-1.1B-intermediate-step-1431k-3T-Q2_K.gguf",
+ .n_gpu_layers = 0,
+ .use_mmap = false,
+ .n_ctx = 2048,
+ .n_batch = 4096,
+ .embeddings = false,
+ .temperature = 0.8f,
+ .min_p = 0.05f,
+ .seed = LLAMA_DEFAULT_SEED,
+ },
+ {
+ .name = "flan-t5-small",
+ .filepath = "models/flan-t5-small.F16.gguf",
+ .n_gpu_layers = 0,
+ .use_mmap = false,
+ .n_ctx = 512,
+ .n_batch = 512,
+ .embeddings = false,
+ .temperature = 0.8f,
+ .min_p = 0.05f,
+ .seed = LLAMA_DEFAULT_SEED,
+ },
+ {
+ .name = "phi-4-mini-instruct",
+ .filepath = "models/Phi-4-mini-instruct.Q2_K.gguf",
+ .n_gpu_layers = 0,
+ .use_mmap = false,
+ .n_ctx = 131072,
+ .n_batch = 4096,
+ .embeddings = false,
+ .temperature = 0.8f,
+ .min_p = 0.05f,
+ .seed = LLAMA_DEFAULT_SEED,
+ },
};
-const model_config *get_model_by_name(const char *name) {
- for (size_t i = 0; i < sizeof(models) / sizeof(models[0]); i++) {
- if (models[i].name != NULL && strcmp(models[i].name, name) == 0) {
- return &models[i];
- }
- }
- return NULL;
+const ModelConfig *get_model_by_name(const char *name) {
+ for (size_t i = 0; i < sizeof(models) / sizeof(models[0]); i++) {
+ if (models[i].name != NULL && strcmp(models[i].name, name) == 0) {
+ return &models[i];
+ }
+ }
+ return NULL;
}
#endif