From a1a595a3305727d30e16e856f4faf95980643e1c Mon Sep 17 00:00:00 2001 From: Mitja Felicijan Date: Fri, 13 Feb 2026 03:29:25 +0100 Subject: Simple Vector Database --- Dockerfile | 3 +- Makefile | 6 +- context.txt | 33 +++-- prompt.c | 408 +++++++++++++++++++++++++++++++++++++++++++++++++++++------- vectordb.c | 92 ++++++++++++++ vectordb.h | 29 +++++ 6 files changed, 515 insertions(+), 56 deletions(-) create mode 100644 vectordb.c create mode 100644 vectordb.h diff --git a/Dockerfile b/Dockerfile index 8f438fa..7f1b700 100644 --- a/Dockerfile +++ b/Dockerfile @@ -4,6 +4,7 @@ RUN apt-get update RUN apt-get install -y libstdc++6 COPY prompt /app/prompt +COPY context.txt /app/context.txt COPY models/ /app/models/ -# ENTRYPOINT ["bash"] +ENTRYPOINT ["bash"] diff --git a/Makefile b/Makefile index 7267ff9..94f009d 100644 --- a/Makefile +++ b/Makefile @@ -12,8 +12,8 @@ LDFLAGS = -L$(LLAMA_DIR)/build/src -L$(LLAMA_DIR)/build/ggml/src \ help: .help -prompt: prompt.c models.h # Build prompt binary for testing - $(CC) $(CFLAGS) prompt.c -o prompt $(LDFLAGS) +prompt: prompt.c vectordb.c models.h # Build prompt binary for testing + $(CC) $(CFLAGS) prompt.c vectordb.c -o prompt $(LDFLAGS) llamacpp: .assure # Build llama.cpp libraries mkdir $(LLAMA_DIR)/build && \ @@ -27,7 +27,7 @@ fetchmodels: .assure # Fetch GGUF models docker: .assure # Runs prompt in Docker container docker build -t promptd . - docker run -it promptd bash + docker run -it promptd clean: # Cleans up all the build artefacts -rm -f prompt diff --git a/context.txt b/context.txt index 12d3073..1d97eb3 100644 --- a/context.txt +++ b/context.txt @@ -1,9 +1,24 @@ -Gandalf: wizard, Lord of the Rings, grey beard, staff, Istari, Grey Pilgrim, Mithrandir, fought Sauron, helped destroy One Ring. - -Frodo: hobbit, Lord of the Rings, Bilbo's nephew, Shire, carried One Ring to Mount Doom, Fellowship of the Ring. - -Example: Who is Gandalf? Gandalf is a wizard from The Lord of the Rings. -Example: Who is Frodo? Frodo is a hobbit from The Lord of the Rings. -Example: Who is Harry Potter? I don't have that information. - -Answer this question. Use only the facts from above. If unknown, say "I don't have that information." Just give the answer, no prefix: +Gandalf is a wizard in The Lord of the Rings with a grey beard and a staff. +Gandalf is one of the Istari and is called the Grey Pilgrim and Mithrandir. +Gandalf fought Sauron and helped destroy the One Ring. +Frodo Baggins is a hobbit in The Lord of the Rings and is Bilbo's nephew. +Frodo is from the Shire and carried the One Ring to Mount Doom. +Frodo is a member of the Fellowship of the Ring. +Samwise Gamgee is a hobbit from the Shire in The Lord of the Rings. +Samwise is Frodo's loyal companion and a member of the Fellowship of the Ring. +Aragorn is a man in The Lord of the Rings and is known as Strider. +Aragorn is a ranger, a leader of Men, and a member of the Fellowship of the Ring. +Legolas is an elf in The Lord of the Rings and a skilled archer. +Legolas is a member of the Fellowship of the Ring. +Gimli is a dwarf in The Lord of the Rings and a warrior. +Gimli is a member of the Fellowship of the Ring. +Boromir is a man from Gondor in The Lord of the Rings. +Boromir is a member of the Fellowship of the Ring. +The One Ring is a powerful ring in The Lord of the Rings that was created by Sauron. +The One Ring corrupts its bearer and must be destroyed in Mount Doom. +Sauron is the Dark Lord in The Lord of the Rings and created the One Ring. +Sauron is an enemy of the free peoples of Middle-earth. +Mordor is the realm of Sauron in The Lord of the Rings and contains Mount Doom. +Mount Doom is a volcano in Mordor in The Lord of the Rings where the One Ring was destroyed. +The Shire is the homeland of hobbits in The Lord of the Rings and the home of Frodo and Samwise. +Gondor is a kingdom of Men in The Lord of the Rings and the home of Boromir. diff --git a/prompt.c b/prompt.c index 23f3d7c..be6f85f 100644 --- a/prompt.c +++ b/prompt.c @@ -1,55 +1,113 @@ #include "llama.h" +#include "vectordb.h" #include "models.h" + #include #include #include #include +#include -static void show_help(const char *prog) { - printf("Usage: %s [OPTIONS]\n", prog); - printf("Options:\n"); - printf(" -m, --model Specify model to use (default: first model)\n"); - printf(" -p, --prompt Specify prompt text (default: \"What is 2+2?\")\n"); - printf(" -h, --help Show this help message\n"); -} +#define MAX_TOKENS 512 +#define MAX_TOKEN_LEN 32 -int main(int argc, char **argv) { - const char *model_name = NULL; - const char *prompt = NULL; - - int n_predict = 64; +static const char *refusal_text = "I don't have that information."; - static struct option long_options[] = { - {"model", required_argument, 0, 'm'}, - {"prompt", required_argument, 0, 'p'}, - {"help", no_argument, 0, 'h'}, - {0, 0, 0, 0} +static void llama_log_callback(enum ggml_log_level level, const char *text, void *user_data) { + (void)level; + (void)user_data; + (void)text; +} + +static int is_stopword(const char *token, size_t len) { + static const char *stopwords[] = { + "a", "an", "the", "is", "are", "was", "were", "of", "to", "in", "on", + "for", "with", "and", "or", "not", "if", "then", "else", "from", "by", + "as", "at", "it", "its", "this", "that", "these", "those", "who", "what", + "when", "where", "why", "how", "which", "about", "into", "over", "under", + "be", "been", "being", "do", "does", "did", "but", "so", "than" }; + for (size_t i = 0; i < sizeof(stopwords) / sizeof(stopwords[0]); i++) { + if (strlen(stopwords[i]) == len && strncmp(stopwords[i], token, len) == 0) { + return 1; + } + } + return 0; +} - int opt; - int option_index = 0; - while ((opt = getopt_long(argc, argv, "m:p:h", long_options, &option_index)) != -1) { - switch (opt) { - case 'm': - model_name = optarg; - break; - case 'p': - prompt = optarg; +static int token_exists(char tokens[MAX_TOKENS][MAX_TOKEN_LEN], int count, const char *token) { + for (int i = 0; i < count; i++) { + if (strcmp(tokens[i], token) == 0) { + return 1; + } + } + return 0; +} + +static int collect_tokens(const char *text, char tokens[MAX_TOKENS][MAX_TOKEN_LEN]) { + int count = 0; + char buf[MAX_TOKEN_LEN]; + int len = 0; + for (const unsigned char *p = (const unsigned char *)text; ; p++) { + if (isalnum(*p)) { + if (len < MAX_TOKEN_LEN - 1) { + buf[len++] = (char)tolower(*p); + } + } else { + if (len > 0) { + buf[len] = '\0'; + if (len >= 4 && !is_stopword(buf, (size_t)len)) { + if (!token_exists(tokens, count, buf) && count < MAX_TOKENS) { + strncpy(tokens[count], buf, MAX_TOKEN_LEN - 1); + tokens[count][MAX_TOKEN_LEN - 1] = '\0'; + count++; + } + } + len = 0; + } + if (*p == '\0') { break; - case 'h': - show_help(argv[0]); - return 0; - default: - fprintf(stderr, "Usage: %s [-m model] [-p prompt] [-h]\n", argv[0]); - return 1; + } } } + return count; +} - if (prompt == NULL) { - printf("Prompt must be provided. Exiting..."); - return 1; +static int has_overlap(const char *a, const char *b) { + if (a == NULL || b == NULL) { + return 0; } + char tokens[MAX_TOKENS][MAX_TOKEN_LEN]; + int token_count = collect_tokens(b, tokens); + if (token_count == 0) { + return 0; + } + char buf[MAX_TOKEN_LEN]; + int len = 0; + for (const unsigned char *p = (const unsigned char *)a; ; p++) { + if (isalnum(*p)) { + if (len < MAX_TOKEN_LEN - 1) { + buf[len++] = (char)tolower(*p); + } + } else { + if (len > 0) { + buf[len] = '\0'; + if (len >= 4 && !is_stopword(buf, (size_t)len)) { + if (token_exists(tokens, token_count, buf)) { + return 1; + } + } + len = 0; + } + if (*p == '\0') { + break; + } + } + } + return 0; +} +static int execute_prompt(const char *model_name, const char *prompt, const char *context, int n_predict) { const model_config *cfg = NULL; if (model_name != NULL) { cfg = get_model_by_name(model_name); @@ -61,6 +119,12 @@ int main(int argc, char **argv) { cfg = &models[0]; } + if (!has_overlap(prompt, context)) { + printf("------------ Prompt: %s\n", prompt); + printf("------------ Response: %s\n", refusal_text); + return 0; + } + ggml_backend_load_all(); struct llama_model_params model_params = llama_model_default_params(); @@ -75,10 +139,27 @@ int main(int argc, char **argv) { const struct llama_vocab *vocab = llama_model_get_vocab(model); - int n_prompt = -llama_tokenize(vocab, prompt, strlen(prompt), NULL, 0, true, true); + const char *system_prefix = "System: Answer using only the Context. If the answer is not explicitly stated in Context, respond exactly: I don't have that information.\n\n"; + const char *context_prefix = "Context:\n"; + const char *prompt_prefix = "\n\nQuestion:\n"; + const char *answer_prefix = "\n\nAnswer:\n"; + size_t context_len = context ? strlen(context) : 0; + size_t prompt_len = strlen(prompt); + size_t full_len = strlen(system_prefix) + strlen(context_prefix) + context_len + strlen(prompt_prefix) + prompt_len + strlen(answer_prefix) + 1; + char *full_prompt = (char *)malloc(full_len); + if (full_prompt == NULL) { + fprintf(stderr, "Error: failed to allocate prompt buffer\n"); + llama_model_free(model); + return 1; + } + snprintf(full_prompt, full_len, "%s%s%s%s%s", system_prefix, context_prefix, context ? context : "", prompt_prefix, prompt); + strncat(full_prompt, answer_prefix, full_len - strlen(full_prompt) - 1); + + int n_prompt = -llama_tokenize(vocab, full_prompt, strlen(full_prompt), NULL, 0, true, true); llama_token *prompt_tokens = (llama_token *)malloc(n_prompt * sizeof(llama_token)); - if (llama_tokenize(vocab, prompt, strlen(prompt), prompt_tokens, n_prompt, true, true) < 0) { + if (llama_tokenize(vocab, full_prompt, strlen(full_prompt), prompt_tokens, n_prompt, true, true) < 0) { fprintf(stderr, "Error: failed to tokenize the prompt\n"); + free(full_prompt); free(prompt_tokens); llama_model_free(model); return 1; @@ -92,6 +173,7 @@ int main(int argc, char **argv) { struct llama_context *ctx = llama_init_from_model(model, ctx_params); if (ctx == NULL) { fprintf(stderr, "Error: failed to create the llama_context\n"); + free(full_prompt); free(prompt_tokens); llama_model_free(model); return 1; @@ -104,10 +186,15 @@ int main(int argc, char **argv) { llama_sampler_chain_add(smpl, llama_sampler_init_dist(cfg->seed)); struct llama_batch batch = llama_batch_get_one(prompt_tokens, n_prompt); - + if (llama_model_has_encoder(model)) { if (llama_encode(ctx, batch)) { fprintf(stderr, "Error: failed to encode prompt\n"); + llama_sampler_free(smpl); + free(full_prompt); + free(prompt_tokens); + llama_free(ctx); + llama_model_free(model); return 1; } @@ -118,12 +205,25 @@ int main(int argc, char **argv) { batch = llama_batch_get_one(&decoder_start, 1); } - printf("Prompt: %s\n", prompt); - printf("Response: "); + printf("------------ Prompt: %s\n", prompt); + printf("------------ Response: "); fflush(stdout); int n_pos = 0; llama_token new_token_id; + size_t out_cap = 256; + size_t out_len = 0; + char *out = (char *)malloc(out_cap); + if (out == NULL) { + fprintf(stderr, "Error: failed to allocate output buffer\n"); + free(full_prompt); + free(prompt_tokens); + llama_sampler_free(smpl); + llama_free(ctx); + llama_model_free(model); + return 1; + } + out[0] = '\0'; while (n_pos + batch.n_tokens < n_prompt + n_predict) { if (llama_decode(ctx, batch)) { @@ -145,18 +245,240 @@ int main(int argc, char **argv) { fprintf(stderr, "Error: failed to convert token to piece\n"); break; } - printf("%.*s", n, buf); - fflush(stdout); + int stop_at = n; + for (int i = 0; i < n; i++) { + if (buf[i] == '\n') { + stop_at = i; + break; + } + } + if (out_len + (size_t)stop_at + 1 > out_cap) { + while (out_len + (size_t)stop_at + 1 > out_cap) { + out_cap *= 2; + } + char *next = (char *)realloc(out, out_cap); + if (next == NULL) { + fprintf(stderr, "Error: failed to grow output buffer\n"); + break; + } + out = next; + } + memcpy(out + out_len, buf, (size_t)stop_at); + out_len += (size_t)stop_at; + out[out_len] = '\0'; + + if (stop_at != n) { + break; + } batch = llama_batch_get_one(&new_token_id, 1); } - printf("\n"); + if (!has_overlap(out, context)) { + strcpy(out, refusal_text); + out_len = strlen(out); + } + printf("%s\n", out); + + free(full_prompt); free(prompt_tokens); + free(out); llama_sampler_free(smpl); llama_free(ctx); llama_model_free(model); return 0; } + +static char *generate_context(const char *model_name, const char *context_file, const char *prompt) { + FILE *context_fp = fopen(context_file, "r"); + if (context_fp == NULL) { + fprintf(stderr, "Error: unable to open context file %s\n", context_file); + return NULL; + } + + llama_backend_init(); + + const model_config *cfg = NULL; + if (model_name != NULL) { + cfg = get_model_by_name(model_name); + if (cfg == NULL) { + fprintf(stderr, "Error: unknown model '%s'\n", model_name); + fclose(context_fp); + llama_backend_free(); + return NULL; + } + } else { + cfg = &models[0]; + } + + /* struct llama_model *model = llama_load_model_from_file(cfg->filepath, llama_model_default_params()); */ + struct llama_model *model = llama_model_load_from_file(cfg->filepath, llama_model_default_params()); + if (model == NULL) { + fprintf(stderr, "Error: unable to load embedding model\n"); + fclose(context_fp); + llama_backend_free(); + return NULL; + } + + struct llama_context_params cparams = llama_context_default_params(); + cparams.embeddings = true; + + /* struct llama_context *embed_ctx = llama_new_context_with_model(model, cparams); */ + struct llama_context *embed_ctx = llama_init_from_model(model, cparams); + if (embed_ctx == NULL) { + fprintf(stderr, "Error: failed to create embedding context\n"); + llama_model_free(model); + fclose(context_fp); + llama_backend_free(); + return NULL; + } + + VectorDB db; + vdb_init(&db, embed_ctx); + + char line[1024]; + while (fgets(line, sizeof(line), context_fp) != NULL) { + size_t len = strlen(line); + while (len > 0 && (line[len - 1] == '\n' || line[len - 1] == '\r')) { + line[len - 1] = '\0'; + len--; + } + if (len == 0) { + continue; + } + vdb_add_document(&db, line); + } + + float query[VDB_EMBED_SIZE]; + int results[3]; + + vdb_embed_query(&db, prompt, query); + vdb_search(&db, query, 3, results); + + size_t context_cap = 1024; + size_t context_len = 0; + char *context = (char *)malloc(context_cap); + if (context == NULL) { + fprintf(stderr, "Error: failed to allocate context buffer\n"); + fclose(context_fp); + llama_free(embed_ctx); + llama_model_free(model); + llama_backend_free(); + return NULL; + } + context[0] = '\0'; + + for (int i = 0; i < 3; i++) { + if (results[i] < 0) { + continue; + } + const char *text = db.docs[results[i]].text; + size_t text_len = strlen(text); + size_t need = context_len + text_len + 2; + if (need > context_cap) { + while (need > context_cap) { + context_cap *= 2; + } + char *next = (char *)realloc(context, context_cap); + if (next == NULL) { + fprintf(stderr, "Error: failed to grow context buffer\n"); + free(context); + fclose(context_fp); + llama_free(embed_ctx); + llama_model_free(model); + llama_backend_free(); + return NULL; + } + context = next; + } + memcpy(context + context_len, text, text_len); + context_len += text_len; + context[context_len++] = '\n'; + context[context_len] = '\0'; + } + + fclose(context_fp); + llama_free(embed_ctx); + llama_model_free(model); + llama_backend_free(); + + return context; +} + +static void show_help(const char *prog) { + printf("Usage: %s [OPTIONS]\n", prog); + printf("Options:\n"); + printf(" -m, --model Specify model to use (default: first model)\n"); + printf(" -p, --prompt Specify prompt text (default: \"What is 2+2?\")\n"); + printf(" -c, --context Specify context file\n"); + printf(" -v, --verbose Enable verbose logging\n"); + printf(" -h, --help Show this help message\n"); +} + +int main(int argc, char **argv) { + const char *model_name = NULL; + const char *prompt = NULL; + const char *context_file = NULL; + int verbose = 0; + + int n_predict = 64; + + static struct option long_options[] = { + {"model", required_argument, 0, 'm'}, + {"prompt", required_argument, 0, 'p'}, + {"context", required_argument, 0, 'c'}, + {"verbose", no_argument, 0, 'v'}, + {"help", no_argument, 0, 'h'}, + {0, 0, 0, 0} + }; + + int opt; + int option_index = 0; + while ((opt = getopt_long(argc, argv, "m:p:c:vh", long_options, &option_index)) != -1) { + switch (opt) { + case 'm': + model_name = optarg; + break; + case 'p': + prompt = optarg; + break; + case 'c': + context_file = optarg; + break; + case 'v': + verbose = 1; + break; + case 'h': + show_help(argv[0]); + return 0; + default: + fprintf(stderr, "Usage: %s [-m model] [-p prompt] [-h]\n", argv[0]); + return 1; + } + } + + if (verbose == 0) { + llama_log_set(llama_log_callback, NULL); + } + + if (prompt == NULL) { + printf("Prompt must be provided. Exiting..."); + return 1; + } + + if (context_file == NULL) { + printf("Context file must be provided. Exiting..."); + return 1; + } + + char *context = generate_context(model_name, context_file, prompt); + if (context == NULL) { + return 1; + } + + int rc = execute_prompt(model_name, prompt, context, n_predict); + free(context); + return rc; +} diff --git a/vectordb.c b/vectordb.c new file mode 100644 index 0000000..5e45cc4 --- /dev/null +++ b/vectordb.c @@ -0,0 +1,92 @@ +#include +#include +#include + +#include "llama.h" +#include "vectordb.h" + +static float cosine_similarity(float *a, float *b, int n) { + float dot = 0, normA = 0, normB = 0; + for (int i = 0; i < n; i++) { + dot += a[i] * b[i]; + normA += a[i] * a[i]; + normB += b[i] * b[i]; + } + return dot / (sqrtf(normA) * sqrtf(normB) + 1e-8f); +} + +static void embed_text(struct llama_context *ctx, const char *text, float *out) { + llama_token tokens[512]; + const struct llama_model *model = llama_get_model(ctx); + const struct llama_vocab *vocab = llama_model_get_vocab(model); + int n_tokens = llama_tokenize( + vocab, + text, + strlen(text), + tokens, + 512, + true, + true + ); + if (n_tokens < 0) { + return; + } + + struct llama_batch batch = llama_batch_get_one(tokens, n_tokens); + llama_decode(ctx, batch); + + const float *emb = llama_get_embeddings(ctx); + memcpy(out, emb, sizeof(float) * VDB_EMBED_SIZE); + +} + +void vdb_init(VectorDB *db, struct llama_context *embed_ctx) { + memset(db, 0, sizeof(VectorDB)); + db->embed_ctx = embed_ctx; +} + +void vdb_free(VectorDB *db) { + (void)db; // nothing yet (future persistence etc.) +} + +void vdb_add_document(VectorDB *db, const char *text) { + if (db->count >= VDB_MAX_DOCS) { + printf("VectorDB full!\n"); + return; + } + + VectorDoc *doc = &db->docs[db->count++]; + strncpy(doc->text, text, VDB_MAX_TEXT - 1); + doc->text[VDB_MAX_TEXT - 1] = 0; + + printf("Embedding doc %d...\n", db->count); + embed_text(db->embed_ctx, text, doc->embedding); +} + +void vdb_embed_query(VectorDB *db, const char *text, float *out_embedding) { + embed_text(db->embed_ctx, text, out_embedding); +} + +void vdb_search(VectorDB *db, float *query, int top_k, int *results) { + float best_scores[top_k]; + for (int i = 0; i < top_k; i++) { + best_scores[i] = -1.0f; + results[i] = -1; + } + + for (int i = 0; i < db->count; i++) { + float score = cosine_similarity(query, db->docs[i].embedding, VDB_EMBED_SIZE); + + for (int j = 0; j < top_k; j++) { + if (score > best_scores[j]) { + for (int k = top_k - 1; k > j; k--) { + best_scores[k] = best_scores[k - 1]; + results[k] = results[k - 1]; + } + best_scores[j] = score; + results[j] = i; + break; + } + } + } +} diff --git a/vectordb.h b/vectordb.h new file mode 100644 index 0000000..3b375bb --- /dev/null +++ b/vectordb.h @@ -0,0 +1,29 @@ +#ifndef VECTORDB_H +#define VECTORDB_H + +#include "llama.h" + +#define VDB_MAX_DOCS 1000 +#define VDB_EMBED_SIZE 768 +#define VDB_MAX_TEXT 1024 + +typedef struct { + float embedding[VDB_EMBED_SIZE]; + char text[VDB_MAX_TEXT]; +} VectorDoc; + +typedef struct { + VectorDoc docs[VDB_MAX_DOCS]; + int count; + struct llama_context *embed_ctx; +} VectorDB; + +void vdb_init(VectorDB *db, struct llama_context *embed_ctx); +void vdb_free(VectorDB *db); + +void vdb_add_document(VectorDB *db, const char *text); + +void vdb_embed_query(VectorDB *db, const char *text, float *out_embedding); +void vdb_search(VectorDB *db, float *query_embedding, int top_k, int *results); + +#endif -- cgit v1.2.3