summaryrefslogtreecommitdiff
path: root/llama.cpp/src/llama-chat.h
diff options
context:
space:
mode:
Diffstat (limited to 'llama.cpp/src/llama-chat.h')
-rw-r--r--llama.cpp/src/llama-chat.h71
1 files changed, 71 insertions, 0 deletions
diff --git a/llama.cpp/src/llama-chat.h b/llama.cpp/src/llama-chat.h
new file mode 100644
index 0000000..9ed1db1
--- /dev/null
+++ b/llama.cpp/src/llama-chat.h
@@ -0,0 +1,71 @@
+#pragma once
+
+#include <string>
+#include <vector>
+#include <cstdint>
+
+enum llm_chat_template {
+ LLM_CHAT_TEMPLATE_CHATML,
+ LLM_CHAT_TEMPLATE_LLAMA_2,
+ LLM_CHAT_TEMPLATE_LLAMA_2_SYS,
+ LLM_CHAT_TEMPLATE_LLAMA_2_SYS_BOS,
+ LLM_CHAT_TEMPLATE_LLAMA_2_SYS_STRIP,
+ LLM_CHAT_TEMPLATE_MISTRAL_V1,
+ LLM_CHAT_TEMPLATE_MISTRAL_V3,
+ LLM_CHAT_TEMPLATE_MISTRAL_V3_TEKKEN,
+ LLM_CHAT_TEMPLATE_MISTRAL_V7,
+ LLM_CHAT_TEMPLATE_MISTRAL_V7_TEKKEN,
+ LLM_CHAT_TEMPLATE_PHI_3,
+ LLM_CHAT_TEMPLATE_PHI_4,
+ LLM_CHAT_TEMPLATE_FALCON_3,
+ LLM_CHAT_TEMPLATE_ZEPHYR,
+ LLM_CHAT_TEMPLATE_MONARCH,
+ LLM_CHAT_TEMPLATE_GEMMA,
+ LLM_CHAT_TEMPLATE_ORION,
+ LLM_CHAT_TEMPLATE_OPENCHAT,
+ LLM_CHAT_TEMPLATE_VICUNA,
+ LLM_CHAT_TEMPLATE_VICUNA_ORCA,
+ LLM_CHAT_TEMPLATE_DEEPSEEK,
+ LLM_CHAT_TEMPLATE_DEEPSEEK_2,
+ LLM_CHAT_TEMPLATE_DEEPSEEK_3,
+ LLM_CHAT_TEMPLATE_COMMAND_R,
+ LLM_CHAT_TEMPLATE_LLAMA_3,
+ LLM_CHAT_TEMPLATE_CHATGLM_3,
+ LLM_CHAT_TEMPLATE_CHATGLM_4,
+ LLM_CHAT_TEMPLATE_GLMEDGE,
+ LLM_CHAT_TEMPLATE_MINICPM,
+ LLM_CHAT_TEMPLATE_EXAONE_3,
+ LLM_CHAT_TEMPLATE_EXAONE_4,
+ LLM_CHAT_TEMPLATE_EXAONE_MOE,
+ LLM_CHAT_TEMPLATE_RWKV_WORLD,
+ LLM_CHAT_TEMPLATE_GRANITE,
+ LLM_CHAT_TEMPLATE_GIGACHAT,
+ LLM_CHAT_TEMPLATE_MEGREZ,
+ LLM_CHAT_TEMPLATE_YANDEX,
+ LLM_CHAT_TEMPLATE_BAILING,
+ LLM_CHAT_TEMPLATE_BAILING_THINK,
+ LLM_CHAT_TEMPLATE_BAILING2,
+ LLM_CHAT_TEMPLATE_LLAMA4,
+ LLM_CHAT_TEMPLATE_SMOLVLM,
+ LLM_CHAT_TEMPLATE_DOTS1,
+ LLM_CHAT_TEMPLATE_HUNYUAN_MOE,
+ LLM_CHAT_TEMPLATE_OPENAI_MOE,
+ LLM_CHAT_TEMPLATE_HUNYUAN_DENSE,
+ LLM_CHAT_TEMPLATE_KIMI_K2,
+ LLM_CHAT_TEMPLATE_SEED_OSS,
+ LLM_CHAT_TEMPLATE_GROK_2,
+ LLM_CHAT_TEMPLATE_PANGU_EMBED,
+ LLM_CHAT_TEMPLATE_SOLAR_OPEN,
+ LLM_CHAT_TEMPLATE_UNKNOWN,
+};
+
+struct llama_chat_message;
+
+llm_chat_template llm_chat_template_from_str(const std::string & name);
+
+llm_chat_template llm_chat_detect_template(const std::string & tmpl);
+
+int32_t llm_chat_apply_template(
+ llm_chat_template tmpl,
+ const std::vector<const llama_chat_message *> & chat,
+ std::string & dest, bool add_ass);