1- cmake_minimum_required (VERSION 3.12) # Don't bump this version for no reason
1+ # 3.13 is required for target_link_libraries.
2+ # Don't bump this version for no reason.
3+ cmake_minimum_required (VERSION 3.13)
4+
25project ("llama.cpp" C CXX)
36
47set (CMAKE_EXPORT_COMPILE_COMMANDS ON )
@@ -58,12 +61,12 @@ option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer"
5861option (LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF )
5962
6063# instruction set specific
61- option (LLAMA_AVX "llama: enable AVX" ON )
62- option (LLAMA_AVX2 "llama: enable AVX2" ON )
64+ option (LLAMA_AVX "llama: enable AVX" OFF )
65+ option (LLAMA_AVX2 "llama: enable AVX2" OFF )
6366option (LLAMA_AVX512 "llama: enable AVX512" OFF )
6467option (LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF )
6568option (LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF )
66- option (LLAMA_FMA "llama: enable FMA" ON )
69+ option (LLAMA_FMA "llama: enable FMA" OFF )
6770# in MSVC F16C is implied with AVX2/AVX512
6871if (NOT MSVC )
6972 option (LLAMA_F16C "llama: enable F16C" ON )
@@ -74,7 +77,7 @@ option(LLAMA_ACCELERATE "llama: enable Accelerate framework
7477option (LLAMA_BLAS "llama: use BLAS" OFF )
7578set (LLAMA_BLAS_VENDOR "Generic" CACHE STRING "llama: BLAS library vendor" )
7679option (LLAMA_CUBLAS "llama: use CUDA" OFF )
77- #option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF)
80+ #option(LLAMA_CUDA_CUBLAS "llama: use cuBLAS for prompt processing" OFF)
7881option (LLAMA_CUDA_FORCE_DMMV "llama: use dmmv instead of mmvq CUDA kernels" OFF )
7982set (LLAMA_CUDA_DMMV_X "32" CACHE STRING "llama: x stride for dmmv CUDA kernels" )
8083set (LLAMA_CUDA_MMV_Y "1" CACHE STRING "llama: y block size for mmv CUDA kernels" )
@@ -83,16 +86,17 @@ set(LLAMA_CUDA_KQUANTS_ITER "2" CACHE STRING "llama: iters./thread per block for
8386set (LLAMA_CUDA_PEER_MAX_BATCH_SIZE "128" CACHE STRING
8487 "llama: max. batch size for using peer access" )
8588option (LLAMA_HIPBLAS "llama: use hipBLAS" OFF )
89+ option (LLAMA_MINGW_COMPAT "llama: use MinGW compatibility headers" OFF )
8690option (LLAMA_CLBLAST "llama: use CLBlast" OFF )
8791option (LLAMA_METAL "llama: use Metal" ${LLAMA_METAL_DEFAULT} )
8892option (LLAMA_METAL_NDEBUG "llama: disable Metal debugging" OFF )
8993option (LLAMA_MPI "llama: use MPI" OFF )
9094option (LLAMA_K_QUANTS "llama: use k-quants" ON )
9195option (LLAMA_QKK_64 "llama: use super-block size of 64 for k-quants" OFF )
9296
93- option (LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE} )
94- option (LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE} )
95- option (LLAMA_BUILD_SERVER "llama: build server example" ON )
97+ option (LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE} )
98+ option (LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE} )
99+ option (LLAMA_BUILD_SERVER "llama: build server example" ON )
96100
97101#
98102# Build info header
@@ -627,6 +631,9 @@ endif()
627631if (CMAKE_SYSTEM_NAME MATCHES "OpenBSD" )
628632 add_compile_definitions (_BSD_SOURCE)
629633endif ()
634+ if ((MINGW) AND (LLAMA_MINGW_COMPAT))
635+ add_compile_definitions (_USE_MINGW_COMPAT)
636+ endif ()
630637
631638#
632639# libraries
@@ -674,6 +681,10 @@ add_library(llama
674681 )
675682
676683target_include_directories (llama PUBLIC .)
684+ if (MINGW AND LLAMA_MINGW_COMPAT)
685+ include_directories (PRIVATE ${CMAKE_SOURCE_DIR} /compat/mingw)
686+ endif ()
687+
677688target_compile_features (llama PUBLIC cxx_std_11) # don't bump
678689target_link_libraries (llama PRIVATE
679690 ggml
@@ -728,10 +739,10 @@ set(GGML_PUBLIC_HEADERS "ggml.h"
728739 "${GGML_HEADERS_METAL} " "${GGML_HEADERS_MPI} " "${GGML_HEADERS_EXTRA} " )
729740
730741set_target_properties (ggml PROPERTIES PUBLIC_HEADER "${GGML_PUBLIC_HEADERS} " )
731- install (TARGETS ggml PUBLIC_HEADER )
742+ install (TARGETS ggml PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} )
732743
733744set_target_properties (llama PROPERTIES PUBLIC_HEADER ${CMAKE_CURRENT_SOURCE_DIR} /llama.h)
734- install (TARGETS llama LIBRARY PUBLIC_HEADER )
745+ install (TARGETS llama PUBLIC_HEADER DESTINATION ${CMAKE_INSTALL_INCDIR} ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} )
735746
736747install (
737748 FILES convert.py
0 commit comments