1llama_add_compile_flags()
2
3function(llama_build source)
4 set(TEST_SOURCES ${source} ${ARGN})
5
6 if (DEFINED LLAMA_TEST_NAME)
7 set(TEST_TARGET ${LLAMA_TEST_NAME})
8 else()
9 get_filename_component(TEST_TARGET ${source} NAME_WE)
10 endif()
11
12 add_executable(${TEST_TARGET} ${TEST_SOURCES})
13 target_link_libraries(${TEST_TARGET} PRIVATE common)
14 if (LLAMA_TESTS_INSTALL)
15 install(TARGETS ${TEST_TARGET} RUNTIME)
16 endif()
17endfunction()
18
19function(llama_test target)
20 include(CMakeParseArguments)
21 set(options)
22 set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
23 set(multiValueArgs ARGS)
24 cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
25
26 if (NOT DEFINED LLAMA_TEST_LABEL)
27 set(LLAMA_TEST_LABEL "main")
28 endif()
29 if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
30 set(LLAMA_TEST_WORKING_DIRECTORY .)
31 endif()
32 if (DEFINED LLAMA_TEST_NAME)
33 set(TEST_NAME ${LLAMA_TEST_NAME})
34 else()
35 set(TEST_NAME ${target})
36 endif()
37
38 set(TEST_TARGET ${target})
39
40 add_test(
41 NAME ${TEST_NAME}
42 WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
43 COMMAND $<TARGET_FILE:${TEST_TARGET}>
44 ${LLAMA_TEST_ARGS})
45
46 set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL})
47endfunction()
48
49function(llama_test_cmd target)
50 include(CMakeParseArguments)
51 set(options)
52 set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
53 set(multiValueArgs ARGS)
54 cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
55
56 if (NOT DEFINED LLAMA_TEST_LABEL)
57 set(LLAMA_TEST_LABEL "main")
58 endif()
59 if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
60 set(LLAMA_TEST_WORKING_DIRECTORY .)
61 endif()
62 if (DEFINED LLAMA_TEST_NAME)
63 set(TEST_NAME ${LLAMA_TEST_NAME})
64 else()
65 set(TEST_NAME ${target})
66 endif()
67
68 add_test(
69 NAME ${TEST_NAME}
70 WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
71 COMMAND ${target}
72 ${LLAMA_TEST_ARGS})
73
74 set_property(TEST ${TEST_NAME} PROPERTY LABELS ${LLAMA_TEST_LABEL})
75endfunction()
76
77# Builds and runs a test source file.
78# Optional args:
79# - NAME: name of the executable & test target (defaults to the source file name without extension)
80# - LABEL: label for the test (defaults to main)
81# - ARGS: arguments to pass to the test executable
82# - WORKING_DIRECTORY
83function(llama_build_and_test source)
84 include(CMakeParseArguments)
85 set(options)
86 set(oneValueArgs NAME LABEL WORKING_DIRECTORY)
87 set(multiValueArgs ARGS)
88 cmake_parse_arguments(LLAMA_TEST "${options}" "${oneValueArgs}" "${multiValueArgs}" ${ARGN})
89
90 set(TEST_SOURCES ${source} ${LLAMA_TEST_UNPARSED_ARGUMENTS} get-model.cpp)
91
92 if (NOT DEFINED LLAMA_TEST_LABEL)
93 set(LLAMA_TEST_LABEL "main")
94 endif()
95 if (NOT DEFINED LLAMA_TEST_WORKING_DIRECTORY)
96 set(LLAMA_TEST_WORKING_DIRECTORY .)
97 endif()
98 if (DEFINED LLAMA_TEST_NAME)
99 set(TEST_TARGET ${LLAMA_TEST_NAME})
100 else()
101 get_filename_component(TEST_TARGET ${source} NAME_WE)
102 endif()
103
104 add_executable(${TEST_TARGET} ${TEST_SOURCES})
105 if (LLAMA_TESTS_INSTALL)
106 install(TARGETS ${TEST_TARGET} RUNTIME)
107 endif()
108 target_link_libraries(${TEST_TARGET} PRIVATE common)
109
110 add_test(
111 NAME ${TEST_TARGET}
112 WORKING_DIRECTORY ${LLAMA_TEST_WORKING_DIRECTORY}
113 COMMAND $<TARGET_FILE:${TEST_TARGET}>
114 ${LLAMA_TEST_ARGS})
115
116 set_property(TEST ${TEST_TARGET} PROPERTY LABELS ${LLAMA_TEST_LABEL})
117endfunction()
118
119# build test-tokenizer-0 target once and add many tests
120llama_build(test-tokenizer-0.cpp)
121
122llama_test(test-tokenizer-0 NAME test-tokenizer-0-bert-bge ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-bert-bge.gguf)
123llama_test(test-tokenizer-0 NAME test-tokenizer-0-command-r ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-command-r.gguf)
124llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-coder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-coder.gguf)
125llama_test(test-tokenizer-0 NAME test-tokenizer-0-deepseek-llm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-deepseek-llm.gguf)
126llama_test(test-tokenizer-0 NAME test-tokenizer-0-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf)
127llama_test(test-tokenizer-0 NAME test-tokenizer-0-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf)
128llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf)
129llama_test(test-tokenizer-0 NAME test-tokenizer-0-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf)
130llama_test(test-tokenizer-0 NAME test-tokenizer-0-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf)
131llama_test(test-tokenizer-0 NAME test-tokenizer-0-phi-3 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-phi-3.gguf)
132llama_test(test-tokenizer-0 NAME test-tokenizer-0-qwen2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-qwen2.gguf)
133llama_test(test-tokenizer-0 NAME test-tokenizer-0-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf)
134llama_test(test-tokenizer-0 NAME test-tokenizer-0-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf)
135
136if (NOT WIN32)
137 llama_test_cmd(
138 ${CMAKE_CURRENT_SOURCE_DIR}/test-tokenizers-repo.sh
139 NAME test-tokenizers-ggml-vocabs
140 WORKING_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
141 ARGS https://huggingface.co/ggml-org/vocabs ${PROJECT_SOURCE_DIR}/models/ggml-vocabs
142 )
143endif()
144
145if (LLAMA_LLGUIDANCE)
146 llama_build_and_test(test-grammar-llguidance.cpp ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf)
147endif ()
148
149if (NOT WIN32 OR NOT BUILD_SHARED_LIBS)
150 # these tests are disabled on Windows because they use internal functions not exported with LLAMA_API (when building with shared libraries)
151 llama_build_and_test(test-sampling.cpp)
152 llama_build_and_test(test-grammar-parser.cpp)
153 llama_build_and_test(test-grammar-integration.cpp)
154 llama_build_and_test(test-llama-grammar.cpp)
155 llama_build_and_test(test-chat.cpp)
156 # TODO: disabled on loongarch64 because the ggml-ci node lacks Python 3.8
157 if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "loongarch64")
158 llama_build_and_test(test-json-schema-to-grammar.cpp WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
159 target_include_directories(test-json-schema-to-grammar PRIVATE ${PROJECT_SOURCE_DIR}/tools/server)
160 endif()
161
162 if (NOT GGML_BACKEND_DL)
163 llama_build(test-quantize-stats.cpp)
164 endif()
165
166 llama_build(test-gbnf-validator.cpp)
167
168 # build test-tokenizer-1-bpe target once and add many tests
169 llama_build(test-tokenizer-1-bpe.cpp)
170
171 # TODO: disabled due to slowness
172 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-aquila ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-aquila.gguf)
173 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-falcon ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-falcon.gguf)
174 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-2 ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-2.gguf)
175 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-gpt-neox ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-gpt-neox.gguf)
176 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-llama-bpe ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-bpe.gguf --ignore-merges)
177 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-mpt ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-mpt.gguf)
178 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-refact ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-refact.gguf)
179 #llama_test(test-tokenizer-1-bpe NAME test-tokenizer-1-starcoder ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-starcoder.gguf)
180
181 # build test-tokenizer-1-spm target once and add many tests
182 llama_build(test-tokenizer-1-spm.cpp)
183
184 llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-llama-spm ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-llama-spm.gguf)
185 #llama_test(test-tokenizer-1-spm NAME test-tokenizer-1-baichuan ARGS ${PROJECT_SOURCE_DIR}/models/ggml-vocab-baichuan.gguf)
186
187 # llama_build_and_test(test-double-float.cpp) # SLOW
188endif()
189
190llama_build_and_test(test-chat-parser.cpp)
191llama_build_and_test(test-chat-peg-parser.cpp peg-parser/simple-tokenize.cpp)
192llama_build_and_test(test-chat-template.cpp)
193llama_build_and_test(test-jinja.cpp)
194llama_test(test-jinja NAME test-jinja-py ARGS -py LABEL python)
195llama_build_and_test(test-json-partial.cpp)
196llama_build_and_test(test-log.cpp)
197llama_build_and_test(
198 test-peg-parser.cpp
199 peg-parser/simple-tokenize.cpp
200 peg-parser/test-basic.cpp
201 peg-parser/test-gbnf-generation.cpp
202 peg-parser/test-json-parser.cpp
203 peg-parser/test-json-serialization.cpp
204 peg-parser/test-unicode.cpp
205 peg-parser/tests.h
206)
207llama_build_and_test(test-regex-partial.cpp)
208
209if (NOT ${CMAKE_SYSTEM_PROCESSOR} MATCHES "s390x")
210 set(MODEL_NAME "tinyllamas/stories15M-q4_0.gguf")
211 set(MODEL_HASH "SHA256=66967fbece6dbe97886593fdbb73589584927e29119ec31f08090732d1861739")
212else()
213 set(MODEL_NAME "tinyllamas/stories15M-be.Q4_0.gguf")
214 set(MODEL_HASH "SHA256=9aec857937849d976f30397e97eb1cabb53eb9dcb1ce4611ba8247fb5f44c65d")
215endif()
216set(MODEL_DEST "${CMAKE_BINARY_DIR}/${MODEL_NAME}")
217
218add_test(NAME test-download-model COMMAND ${CMAKE_COMMAND}
219 -DDEST=${MODEL_DEST}
220 -DNAME=${MODEL_NAME}
221 -DHASH=${MODEL_HASH}
222 -P ${CMAKE_SOURCE_DIR}/cmake/download-models.cmake
223)
224set_tests_properties(test-download-model PROPERTIES FIXTURES_SETUP test-download-model)
225
226llama_build_and_test(test-thread-safety.cpp ARGS -m "${MODEL_DEST}" -ngl 99 -p "The meaning of life is" -n 128 -c 256 -ub 32 -np 4 -t 2)
227set_tests_properties(test-thread-safety PROPERTIES FIXTURES_REQUIRED test-download-model)
228
229llama_build_and_test(test-arg-parser.cpp)
230
231if (NOT LLAMA_SANITIZE_ADDRESS AND NOT GGML_SCHED_NO_REALLOC)
232 # TODO: repair known memory leaks
233 llama_build_and_test(test-opt.cpp)
234endif()
235llama_build_and_test(test-gguf.cpp)
236llama_build_and_test(test-backend-ops.cpp)
237
238llama_build_and_test(test-model-load-cancel.cpp LABEL "model")
239llama_build_and_test(test-autorelease.cpp LABEL "model")
240llama_build_and_test(test-backend-sampler.cpp LABEL "model")
241
242# Test for state restore with fragmented KV cache
243# Requires a model, uses same args pattern as test-thread-safety
244llama_build_and_test(test-state-restore-fragmented.cpp LABEL "model" ARGS -m "${MODEL_DEST}")
245set_tests_properties(test-state-restore-fragmented PROPERTIES FIXTURES_REQUIRED test-download-model)
246
247if (NOT GGML_BACKEND_DL)
248 # these tests use the backends directly and cannot be built with dynamic loading
249 llama_build_and_test(test-barrier.cpp)
250 llama_build_and_test(test-quantize-fns.cpp)
251 llama_build_and_test(test-quantize-perf.cpp)
252 llama_build_and_test(test-rope.cpp)
253endif()
254
255# libmtmd
256set(LLAMA_TEST_NAME test-mtmd-c-api)
257llama_build_and_test(test-mtmd-c-api.c)
258target_link_libraries(${LLAMA_TEST_NAME} PRIVATE mtmd)
259
260# dummy executable - not installed
261get_filename_component(TEST_TARGET test-c.c NAME_WE)
262add_executable(${TEST_TARGET} test-c.c)
263target_link_libraries(${TEST_TARGET} PRIVATE llama)
264
265llama_build_and_test(test-alloc.cpp)
266target_include_directories(test-alloc PRIVATE ${PROJECT_SOURCE_DIR}/ggml/src)