@@ -104,7 +104,7 @@ option(LLAMA_BUILD_SERVER "llama: build server example"
104104# Compile flags
105105#
106106
107- set (CMAKE_CXX_STANDARD 11 )
107+ set (CMAKE_CXX_STANDARD 20 )
108108set (CMAKE_CXX_STANDARD_REQUIRED true )
109109set (CMAKE_C_STANDARD 11)
110110set (CMAKE_C_STANDARD_REQUIRED true )
@@ -230,7 +230,12 @@ if (LLAMA_BLAS)
230230
231231 message (STATUS "BLAS found, Includes: ${BLAS_INCLUDE_DIRS} " )
232232 add_compile_options (${BLAS_LINKER_FLAGS} )
233- add_compile_definitions (GGML_USE_OPENBLAS)
233+
234+ # from https://github.com/NVIDIA/cutlass
235+ make_directory ("${PROJECT_BINARY_DIR} /nvcc_tmp" )
236+ set (cuda_flags --keep "SHELL:--keep-dir ${PROJECT_BINARY_DIR} /nvcc_tmp" ${cuda_flags} )
237+
238+ # add_compile_definitions(GGML_USE_OPENBLAS)
234239 if (${BLAS_INCLUDE_DIRS} MATCHES "mkl" AND (${LLAMA_BLAS_VENDOR} MATCHES "Generic" OR ${LLAMA_BLAS_VENDOR} MATCHES "Intel" ))
235240 add_compile_definitions (GGML_BLAS_USE_MKL)
236241 endif ()
@@ -312,7 +317,7 @@ if (LLAMA_MPI)
312317 if (MPI_C_FOUND)
313318 message (STATUS "MPI found" )
314319 set (GGML_HEADERS_MPI ggml-mpi.h)
315- set (GGML_SOURCES_MPI ggml-mpi.c ggml-mpi.h)
320+ set (GGML_SOURCES_MPI ggml-mpi.cpp ggml-mpi.h)
316321 add_compile_definitions (GGML_USE_MPI)
317322 add_compile_definitions (${MPI_C_COMPILE_DEFINITIONS} )
318323 if (NOT MSVC )
@@ -438,6 +443,9 @@ if (NOT cuda_host_flags STREQUAL "")
438443 set (cuda_flags ${cuda_flags} -Xcompiler ${cuda_host_flags} )
439444endif ()
440445
446+ #
447+ set (cuda_flags --verbose -G ${cuda_flags} )
448+
441449add_compile_options ("$<$<COMPILE_LANGUAGE:CUDA>:${cuda_flags} >" )
442450
443451if (WIN32 )
@@ -485,8 +493,10 @@ if (NOT MSVC)
485493 add_link_options (-static -libgcc -static -libstdc++)
486494 endif ()
487495 endif ()
496+ add_link_options ("-Wl,-Map=${TARGET} .map" )
497+
488498 if (LLAMA_GPROF)
489- add_compile_options (-pg)
499+ add_compile_options (-pg)
490500 endif ()
491501endif ()
492502
@@ -645,13 +655,16 @@ if (GGML_USE_CPU_HBM)
645655endif ()
646656
647657add_library (ggml OBJECT
648- ggml.c
658+ ggml.cpp
649659 ggml.h
650- ggml-alloc.c
660+ print.hpp
661+ ggml-internal .hpp
662+ llama-internal .hpp
663+ ggml-alloc.cpp
651664 ggml-alloc.h
652- ggml-backend.c
665+ ggml-backend.cpp
653666 ggml-backend.h
654- ggml-quants.c
667+ ggml-quants.cpp
655668 ggml-quants.h
656669 ${GGML_SOURCES_CUDA} ${GGML_HEADERS_CUDA}
657670 ${GGML_SOURCES_OPENCL} ${GGML_HEADERS_OPENCL}
@@ -683,7 +696,7 @@ add_library(llama
683696 )
684697
685698target_include_directories (llama PUBLIC .)
686- target_compile_features (llama PUBLIC cxx_std_11 ) # don't bump
699+ target_compile_features (llama PUBLIC cxx_std_20 ) # don't bump
687700target_link_libraries (llama PRIVATE
688701 ggml
689702 ${LLAMA_EXTRA_LIBS}
0 commit comments