diff options
Diffstat (limited to 'Makefile')
| -rw-r--r-- | Makefile | 13 |
1 files changed, 9 insertions, 4 deletions
@@ -10,9 +10,12 @@ LDFLAGS = -L$(LLAMA_DIR)/build/src -L$(LLAMA_DIR)/build/ggml/src \ -lpthread -lm -ldl -lstdc++ -g \ -lllama -lggml -lggml-cpu -lggml-base +PROMPT_TXT := $(wildcard prompts/*.txt) +PROMPT_HEADERS := $(PROMPT_TXT:.txt=.h) + help: .help -build/npc: run/system-prompt npc.c vectordb.c models.h # Build npc binary for testing +build/npc: build/prompts npc.c vectordb.c models.h # Build npc binary for testing $(CC) $(CFLAGS) npc.c vectordb.c -o npc $(LDFLAGS) build/context: context.c vectordb.c models.h # Build context binary for testing @@ -24,6 +27,8 @@ build/llama.cpp: .assure # Build llama.cpp libraries cmake ../ -DBUILD_SHARED_LIBS=OFF && \ make -j8 +build/prompts: $(PROMPT_HEADERS) # Generate C style header + run/fetch-models: .assure # Fetch GGUF models -mkdir -p models cd models && wget -nc -i ../models.txt @@ -32,10 +37,10 @@ run/docker: .assure # Runs npc in Docker container docker build -t npcd . docker run -it npcd -run/system-prompt: .assure # Generate C style header - xxd -i system_prompt.txt > system_prompt.h - run/clean: # Cleans up all the build artefacts -rm -f npc cd $(LLAMA_DIR)/build && make clean -rm -Rf $(LLAMA_DIR)/build + +prompts/%.h: prompts/%.txt .assure + xxd -i $< > $@ |
