1cmake_minimum_required(VERSION 3.14) # for add_link_options and implicit target directories.
  2project("llama.cpp" C CXX)
  3include(CheckIncludeFileCXX)
  4
  5#set(CMAKE_WARN_DEPRECATED YES)
  6set(CMAKE_WARN_UNUSED_CLI YES)
  7
  8set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
  9
 10if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
 11    set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
 12    set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
 13endif()
 14
 15message("CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}")
 16
 17# Add path to modules
 18list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake/")
 19
 20set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
 21set(CMAKE_LIBRARY_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
 22
 23if (CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
 24    set(LLAMA_STANDALONE ON)
 25
 26    include(git-vars)
 27
 28    # configure project version
 29    # TODO
 30else()
 31    set(LLAMA_STANDALONE OFF)
 32endif()
 33
 34option(LLAMA_USE_SYSTEM_GGML "Use system libggml" OFF)
 35
 36option(LLAMA_WASM_MEM64 "llama: use 64-bit memory in WASM builds" ON)
 37
 38if (EMSCRIPTEN)
 39    set(BUILD_SHARED_LIBS_DEFAULT OFF)
 40
 41    # Use 64-bit memory to support backend_get_memory queries
 42    # TODO: analyze performance impact, see https://spidermonkey.dev/blog/2025/01/15/is-memory64-actually-worth-using
 43    if (LLAMA_WASM_MEM64)
 44      add_compile_options("-sMEMORY64=1")
 45      add_link_options("-sMEMORY64=1")
 46    endif()
 47    add_link_options("-sALLOW_MEMORY_GROWTH=1")
 48
 49    option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" OFF)
 50    option(LLAMA_BUILD_HTML "llama: build HTML file" ON)
 51    if (LLAMA_BUILD_HTML)
 52        set(CMAKE_EXECUTABLE_SUFFIX ".html")
 53    endif()
 54else()
 55    if (MINGW)
 56        set(BUILD_SHARED_LIBS_DEFAULT OFF)
 57    else()
 58        set(BUILD_SHARED_LIBS_DEFAULT ON)
 59    endif()
 60endif()
 61
 62option(BUILD_SHARED_LIBS "build shared libraries" ${BUILD_SHARED_LIBS_DEFAULT})
 63
 64if (WIN32)
 65    add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
 66endif()
 67
 68if (MSVC)
 69    add_compile_options("$<$<COMPILE_LANGUAGE:C>:/utf-8>")
 70    add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:/utf-8>")
 71    add_compile_options("$<$<COMPILE_LANGUAGE:C>:/bigobj>")
 72    add_compile_options("$<$<COMPILE_LANGUAGE:CXX>:/bigobj>")
 73endif()
 74
 75if (LLAMA_STANDALONE)
 76    # enable parallel builds for msbuild
 77    list(APPEND CMAKE_VS_GLOBALS UseMultiToolTask=true)
 78    list(APPEND CMAKE_VS_GLOBALS EnforceProcessCountAcrossBuilds=true)
 79endif()
 80
 81if (CMAKE_SYSTEM_NAME STREQUAL "iOS")
 82    set(LLAMA_TOOLS_INSTALL_DEFAULT OFF)
 83else()
 84    set(LLAMA_TOOLS_INSTALL_DEFAULT ${LLAMA_STANDALONE})
 85endif()
 86
 87#
 88# option list
 89#
 90
 91# debug
 92option(LLAMA_ALL_WARNINGS           "llama: enable all compiler warnings"                   ON)
 93option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
 94
 95# build
 96option(LLAMA_FATAL_WARNINGS "llama: enable -Werror flag" OFF)
 97
 98# sanitizers
 99option(LLAMA_SANITIZE_THREAD    "llama: enable thread sanitizer"    OFF)
100option(LLAMA_SANITIZE_ADDRESS   "llama: enable address sanitizer"   OFF)
101option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
102
103# utils
104option(LLAMA_BUILD_COMMON "llama: build common utils library" ${LLAMA_STANDALONE})
105
106# extra artifacts
107option(LLAMA_BUILD_TESTS    "llama: build tests"          ${LLAMA_STANDALONE})
108option(LLAMA_BUILD_TOOLS    "llama: build tools"          ${LLAMA_STANDALONE})
109option(LLAMA_BUILD_EXAMPLES "llama: build examples"       ${LLAMA_STANDALONE})
110option(LLAMA_BUILD_SERVER   "llama: build server example" ${LLAMA_STANDALONE})
111option(LLAMA_TOOLS_INSTALL  "llama: install tools"        ${LLAMA_TOOLS_INSTALL_DEFAULT})
112option(LLAMA_TESTS_INSTALL  "llama: install tests"        ON)
113
114# 3rd party libs
115option(LLAMA_HTTPLIB    "llama: httplib for downloading functionality" ON)
116option(LLAMA_OPENSSL    "llama: use openssl to support HTTPS" ON)
117option(LLAMA_LLGUIDANCE "llama-common: include LLGuidance library for structured output in common utils" OFF)
118
119# deprecated
120option(LLAMA_CURL "llama: use libcurl to download model from an URL" OFF)
121if (LLAMA_CURL)
122    message(WARNING "LLAMA_CURL option is deprecated and will be ignored")
123endif()
124
125# Required for relocatable CMake package
126include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/build-info.cmake)
127include(${CMAKE_CURRENT_SOURCE_DIR}/cmake/common.cmake)
128
129if (NOT DEFINED LLAMA_BUILD_NUMBER)
130    set(LLAMA_BUILD_NUMBER        ${BUILD_NUMBER})
131endif()
132if (NOT DEFINED LLAMA_BUILD_COMMIT)
133    set(LLAMA_BUILD_COMMIT        ${BUILD_COMMIT})
134endif()
135set(LLAMA_INSTALL_VERSION 0.0.${LLAMA_BUILD_NUMBER})
136
137# override ggml options
138set(GGML_ALL_WARNINGS   ${LLAMA_ALL_WARNINGS})
139set(GGML_FATAL_WARNINGS ${LLAMA_FATAL_WARNINGS})
140
141# change the default for these ggml options
142if (NOT DEFINED GGML_LLAMAFILE)
143    set(GGML_LLAMAFILE_DEFAULT ON)
144endif()
145
146if (NOT DEFINED GGML_CUDA_GRAPHS)
147    set(GGML_CUDA_GRAPHS_DEFAULT ON)
148endif()
149
150# transition helpers
151function (llama_option_depr TYPE OLD NEW)
152    if (${OLD})
153        message(${TYPE} "${OLD} is deprecated and will be removed in the future.\nUse ${NEW} instead\n")
154        set(${NEW} ON PARENT_SCOPE)
155    endif()
156endfunction()
157
158llama_option_depr(FATAL_ERROR LLAMA_CUBLAS              GGML_CUDA)
159llama_option_depr(WARNING     LLAMA_CUDA                GGML_CUDA)
160llama_option_depr(WARNING     LLAMA_METAL               GGML_METAL)
161llama_option_depr(WARNING     LLAMA_METAL_EMBED_LIBRARY GGML_METAL_EMBED_LIBRARY)
162llama_option_depr(WARNING     LLAMA_NATIVE              GGML_NATIVE)
163llama_option_depr(WARNING     LLAMA_RPC                 GGML_RPC)
164llama_option_depr(WARNING     LLAMA_SYCL                GGML_SYCL)
165llama_option_depr(WARNING     LLAMA_SYCL_F16            GGML_SYCL_F16)
166llama_option_depr(WARNING     LLAMA_CANN                GGML_CANN)
167
168include("cmake/license.cmake")
169license_add_file("llama.cpp" "LICENSE")
170
171#
172# 3rd-party
173#
174
175if (LLAMA_USE_SYSTEM_GGML)
176    message(STATUS "Using system-provided libggml, skipping ggml build")
177    find_package(ggml REQUIRED)
178    add_library(ggml ALIAS ggml::ggml)
179endif()
180
181if (NOT TARGET ggml AND NOT LLAMA_USE_SYSTEM_GGML)
182    set(GGML_BUILD_NUMBER ${LLAMA_BUILD_NUMBER})
183    set(GGML_BUILD_COMMIT ${LLAMA_BUILD_COMMIT})
184    add_subdirectory(ggml)
185    # ... otherwise assume ggml is added by a parent CMakeLists.txt
186endif()
187
188#
189# build the library
190#
191
192add_subdirectory(src)
193
194#
195# utils, programs, examples and tests
196#
197
198if (LLAMA_BUILD_COMMON)
199    add_subdirectory(common)
200    if (LLAMA_HTTPLIB)
201        add_subdirectory(vendor/cpp-httplib)
202    endif()
203endif()
204
205if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
206    include(CTest)
207    add_subdirectory(tests)
208endif()
209
210if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_EXAMPLES)
211    add_subdirectory(examples)
212    add_subdirectory(pocs)
213endif()
214
215if (LLAMA_BUILD_COMMON AND LLAMA_BUILD_TOOLS)
216    add_subdirectory(tools)
217endif()
218
219# Automatically add all files from the 'licenses' directory
220file(GLOB EXTRA_LICENSES "${CMAKE_SOURCE_DIR}/licenses/LICENSE-*")
221
222foreach(FILE_PATH ${EXTRA_LICENSES})
223    get_filename_component(FILE_NAME "${FILE_PATH}" NAME)
224    string(REGEX REPLACE "^LICENSE-" "" NAME "${FILE_NAME}")
225    license_add_file("${NAME}" "${FILE_PATH}")
226endforeach()
227
228if (LLAMA_BUILD_COMMON)
229    license_generate(common)
230endif()
231
232#
233# install
234#
235
236include(GNUInstallDirs)
237include(CMakePackageConfigHelpers)
238
239set(LLAMA_INCLUDE_INSTALL_DIR ${CMAKE_INSTALL_INCLUDEDIR} CACHE PATH "Location of header  files")
240set(LLAMA_LIB_INSTALL_DIR     ${CMAKE_INSTALL_LIBDIR}     CACHE PATH "Location of library files")
241set(LLAMA_BIN_INSTALL_DIR     ${CMAKE_INSTALL_BINDIR}     CACHE PATH "Location of binary  files")
242
243set(LLAMA_PUBLIC_HEADERS
244    ${CMAKE_CURRENT_SOURCE_DIR}/include/llama.h
245    ${CMAKE_CURRENT_SOURCE_DIR}/include/llama-cpp.h)
246
247set_target_properties(llama
248    PROPERTIES
249        PUBLIC_HEADER "${LLAMA_PUBLIC_HEADERS}")
250
251install(TARGETS llama LIBRARY PUBLIC_HEADER)
252
253configure_package_config_file(
254        ${CMAKE_CURRENT_SOURCE_DIR}/cmake/llama-config.cmake.in
255        ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
256    INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama
257    PATH_VARS LLAMA_INCLUDE_INSTALL_DIR
258              LLAMA_LIB_INSTALL_DIR
259              LLAMA_BIN_INSTALL_DIR )
260
261write_basic_package_version_file(
262        ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake
263    VERSION ${LLAMA_INSTALL_VERSION}
264    COMPATIBILITY SameMajorVersion)
265
266install(FILES ${CMAKE_CURRENT_BINARY_DIR}/llama-config.cmake
267              ${CMAKE_CURRENT_BINARY_DIR}/llama-version.cmake
268        DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/llama)
269
270install(
271    FILES convert_hf_to_gguf.py
272    PERMISSIONS
273        OWNER_READ
274        OWNER_WRITE
275        OWNER_EXECUTE
276        GROUP_READ
277        GROUP_EXECUTE
278        WORLD_READ
279        WORLD_EXECUTE
280    DESTINATION ${CMAKE_INSTALL_BINDIR})
281
282configure_file(cmake/llama.pc.in
283        "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
284        @ONLY)
285
286install(FILES "${CMAKE_CURRENT_BINARY_DIR}/llama.pc"
287        DESTINATION ${CMAKE_INSTALL_LIBDIR}/pkgconfig)