summaryrefslogtreecommitdiff
path: root/models.h
blob: e2969717ff0af0c53493895db4ac9d6cba2907bf (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
#ifndef MODELS_H
#define MODELS_H

#include "llama.h"
#include <stddef.h>
#include <string.h>

typedef struct {
	const char *name;
	const char *filepath;
	int n_gpu_layers;
	bool use_mmap;
	int n_ctx;
	int n_batch;
	bool embeddings;
	float temperature;
	float min_p;
	uint32_t seed;
} ModelConfig;

ModelConfig models[] = {
	{
		.name = "tinyllama-1",
		.filepath = "models/TinyLlama-1.1B-intermediate-step-1431k-3T-Q2_K.gguf",
		.n_gpu_layers = 0,
		.use_mmap = false,
		.n_ctx = 2048,
		.n_batch = 4096,
		.embeddings = false,
		.temperature = 0.8f,
		.min_p = 0.05f,
		.seed = LLAMA_DEFAULT_SEED,
	},
	{
		.name = "flan-t5-small",
		.filepath = "models/flan-t5-small.F16.gguf",
		.n_gpu_layers = 0,
		.use_mmap = false,
		.n_ctx = 512,
		.n_batch = 512,
		.embeddings = false,
		.temperature = 0.8f,
		.min_p = 0.05f,
		.seed = LLAMA_DEFAULT_SEED,
	},
	{
		.name = "phi-4-mini-instruct",
		.filepath = "models/Phi-4-mini-instruct.Q2_K.gguf",
		.n_gpu_layers = 0,
		.use_mmap = false,
		.n_ctx = 131072,
		.n_batch = 4096,
		.embeddings = false,
		.temperature = 0.8f,
		.min_p = 0.05f,
		.seed = LLAMA_DEFAULT_SEED,
	},
};

const ModelConfig *get_model_by_name(const char *name) {
	for (size_t i = 0; i < sizeof(models) / sizeof(models[0]); i++) {
		if (models[i].name != NULL && strcmp(models[i].name, name) == 0) {
			return &models[i];
		}
	}
	return NULL;
}

#endif