diff options
| author | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-18 13:29:28 +0100 |
|---|---|---|
| committer | Mitja Felicijan <mitja.felicijan@gmail.com> | 2026-02-18 13:29:28 +0100 |
| commit | 128c209108b1178daeef48247ff55dcecc1c51fe (patch) | |
| tree | a5769522c7bf352624488c8ab34a9a8479c22fec | |
| parent | a123d9f1ebebb6ad20155285c9457a6d6a5c7a61 (diff) | |
| download | llmnpc-128c209108b1178daeef48247ff55dcecc1c51fe.tar.gz | |
Cleanup and refactor
| -rw-r--r-- | context.c | 3 | ||||
| -rw-r--r-- | npc.c | 4 |
2 files changed, 2 insertions, 5 deletions
@@ -10,9 +10,6 @@ #include <string.h> #include <getopt.h> -#define MAX_TOKENS 512 -#define MAX_TOKEN_LEN 32 - static void llama_log_callback(enum ggml_log_level level, const char *text, void *user_data) { (void)level; (void)user_data; @@ -152,8 +152,8 @@ static int execute_prompt_with_context(const ModelConfig *cfg, const char *promp batch = llama_batch_get_one(&decoder_start, 1); } - printf("------------ Prompt: %s\n", prompt); - printf("------------ Response: "); + printf(">> Prompt: %s\n", prompt); + printf(">> Response: "); fflush(stdout); int n_pos = 0; |
