2023-03-21 15:29:41 +00:00
|
|
|
cmake_minimum_required(VERSION 3.12) # Don't bump this version for no reason
|
2023-03-21 00:37:16 +00:00
|
|
|
project("llama.cpp" C CXX)
|
2023-03-13 17:12:33 +00:00
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
set(CMAKE_EXPORT_COMPILE_COMMANDS ON)
|
|
|
|
|
2023-03-13 19:22:15 +00:00
|
|
|
if (NOT XCODE AND NOT MSVC AND NOT CMAKE_BUILD_TYPE)
|
|
|
|
set(CMAKE_BUILD_TYPE Release CACHE STRING "Build type" FORCE)
|
|
|
|
set_property(CACHE CMAKE_BUILD_TYPE PROPERTY STRINGS "Debug" "Release" "MinSizeRel" "RelWithDebInfo")
|
|
|
|
endif()
|
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/bin)
|
|
|
|
|
|
|
|
if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
|
|
|
|
set(LLAMA_STANDALONE ON)
|
|
|
|
|
|
|
|
# configure project version
|
|
|
|
# TODO
|
|
|
|
else()
|
|
|
|
set(LLAMA_STANDALONE OFF)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
if (EMSCRIPTEN)
|
|
|
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
|
|
|
|
|
|
|
option(LLAMA_WASM_SINGLE_FILE "llama: embed WASM inside the generated llama.js" ON)
|
|
|
|
else()
|
|
|
|
if (MINGW)
|
|
|
|
set(BUILD_SHARED_LIBS_DEFAULT OFF)
|
|
|
|
else()
|
|
|
|
set(BUILD_SHARED_LIBS_DEFAULT ON)
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
#
|
|
|
|
# Option list
|
|
|
|
#
|
2023-03-13 17:12:33 +00:00
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
# general
|
|
|
|
option(LLAMA_STATIC "llama: static link libraries" OFF)
|
|
|
|
option(LLAMA_NATIVE "llama: enable -march=native flag" OFF)
|
|
|
|
option(LLAMA_LTO "llama: enable link time optimization" OFF)
|
2023-03-13 17:12:33 +00:00
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
# debug
|
|
|
|
option(LLAMA_ALL_WARNINGS "llama: enable all compiler warnings" ON)
|
|
|
|
option(LLAMA_ALL_WARNINGS_3RD_PARTY "llama: enable all compiler warnings in 3rd party libs" OFF)
|
|
|
|
option(LLAMA_GPROF "llama: enable gprof" OFF)
|
|
|
|
|
|
|
|
# sanitizers
|
|
|
|
option(LLAMA_SANITIZE_THREAD "llama: enable thread sanitizer" OFF)
|
|
|
|
option(LLAMA_SANITIZE_ADDRESS "llama: enable address sanitizer" OFF)
|
|
|
|
option(LLAMA_SANITIZE_UNDEFINED "llama: enable undefined sanitizer" OFF)
|
|
|
|
|
|
|
|
# instruction set specific
|
|
|
|
option(LLAMA_AVX "llama: enable AVX" ON)
|
|
|
|
option(LLAMA_AVX2 "llama: enable AVX2" ON)
|
2023-03-25 21:38:11 +00:00
|
|
|
option(LLAMA_AVX512 "llama: enable AVX512" OFF)
|
2023-04-17 13:10:57 +00:00
|
|
|
option(LLAMA_AVX512_VBMI "llama: enable AVX512-VBMI" OFF)
|
|
|
|
option(LLAMA_AVX512_VNNI "llama: enable AVX512-VNNI" OFF)
|
2023-03-21 00:37:16 +00:00
|
|
|
option(LLAMA_FMA "llama: enable FMA" ON)
|
2023-04-13 12:48:21 +00:00
|
|
|
# in MSVC F16C is implied with AVX2/AVX512
|
|
|
|
if (NOT MSVC)
|
|
|
|
option(LLAMA_F16C "llama: enable F16C" ON)
|
|
|
|
endif()
|
2023-03-21 00:37:16 +00:00
|
|
|
|
|
|
|
# 3rd party libs
|
|
|
|
option(LLAMA_ACCELERATE "llama: enable Accelerate framework" ON)
|
|
|
|
option(LLAMA_OPENBLAS "llama: use OpenBLAS" OFF)
|
2023-04-19 09:22:45 +00:00
|
|
|
option(LLAMA_CUBLAS "llama: use cuBLAS" OFF)
|
2023-03-21 00:37:16 +00:00
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
option(LLAMA_BUILD_TESTS "llama: build tests" ${LLAMA_STANDALONE})
|
|
|
|
option(LLAMA_BUILD_EXAMPLES "llama: build examples" ${LLAMA_STANDALONE})
|
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
#
|
|
|
|
# Compile flags
|
|
|
|
#
|
|
|
|
|
2023-03-31 19:19:16 +00:00
|
|
|
set(CMAKE_CXX_STANDARD 11)
|
2023-03-21 00:37:16 +00:00
|
|
|
set(CMAKE_CXX_STANDARD_REQUIRED true)
|
2023-03-31 19:19:16 +00:00
|
|
|
set(CMAKE_C_STANDARD 11)
|
2023-03-21 00:37:16 +00:00
|
|
|
set(CMAKE_C_STANDARD_REQUIRED true)
|
|
|
|
set(THREADS_PREFER_PTHREAD_FLAG ON)
|
|
|
|
find_package(Threads REQUIRED)
|
2023-03-13 17:12:33 +00:00
|
|
|
|
|
|
|
if (NOT MSVC)
|
|
|
|
if (LLAMA_SANITIZE_THREAD)
|
2023-03-21 00:37:16 +00:00
|
|
|
add_compile_options(-fsanitize=thread)
|
2023-03-25 21:38:11 +00:00
|
|
|
link_libraries(-fsanitize=thread)
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
|
|
|
|
|
|
|
if (LLAMA_SANITIZE_ADDRESS)
|
2023-03-21 00:37:16 +00:00
|
|
|
add_compile_options(-fsanitize=address -fno-omit-frame-pointer)
|
2023-03-25 21:38:11 +00:00
|
|
|
link_libraries(-fsanitize=address)
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
|
|
|
|
|
|
|
if (LLAMA_SANITIZE_UNDEFINED)
|
2023-03-21 00:37:16 +00:00
|
|
|
add_compile_options(-fsanitize=undefined)
|
2023-03-25 21:38:11 +00:00
|
|
|
link_libraries(-fsanitize=undefined)
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
if (APPLE AND LLAMA_ACCELERATE)
|
2023-03-13 17:12:33 +00:00
|
|
|
find_library(ACCELERATE_FRAMEWORK Accelerate)
|
|
|
|
if (ACCELERATE_FRAMEWORK)
|
|
|
|
message(STATUS "Accelerate framework found")
|
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
add_compile_definitions(GGML_USE_ACCELERATE)
|
|
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} ${ACCELERATE_FRAMEWORK})
|
2023-03-13 17:12:33 +00:00
|
|
|
else()
|
|
|
|
message(WARNING "Accelerate framework not found")
|
|
|
|
endif()
|
|
|
|
endif()
|
2023-04-20 01:14:14 +00:00
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
if (LLAMA_OPENBLAS)
|
|
|
|
if (LLAMA_STATIC)
|
|
|
|
set(BLA_STATIC ON)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
set(BLA_VENDOR OpenBLAS)
|
|
|
|
find_package(BLAS)
|
|
|
|
if (BLAS_FOUND)
|
|
|
|
message(STATUS "OpenBLAS found")
|
|
|
|
|
|
|
|
add_compile_definitions(GGML_USE_OPENBLAS)
|
|
|
|
add_link_options(${BLAS_LIBRARIES})
|
2023-04-08 11:15:17 +00:00
|
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} openblas)
|
2023-04-15 05:51:11 +00:00
|
|
|
|
|
|
|
# find header file
|
|
|
|
set(OPENBLAS_INCLUDE_SEARCH_PATHS
|
|
|
|
/usr/include
|
|
|
|
/usr/include/openblas
|
|
|
|
/usr/include/openblas-base
|
|
|
|
/usr/local/include
|
|
|
|
/usr/local/include/openblas
|
|
|
|
/usr/local/include/openblas-base
|
|
|
|
/opt/OpenBLAS/include
|
|
|
|
$ENV{OpenBLAS_HOME}
|
|
|
|
$ENV{OpenBLAS_HOME}/include
|
|
|
|
)
|
|
|
|
find_path(OPENBLAS_INC NAMES cblas.h PATHS ${OPENBLAS_INCLUDE_SEARCH_PATHS})
|
|
|
|
add_compile_options(-I${OPENBLAS_INC})
|
2023-03-21 00:37:16 +00:00
|
|
|
else()
|
|
|
|
message(WARNING "OpenBLAS not found")
|
|
|
|
endif()
|
|
|
|
endif()
|
2023-03-13 17:12:33 +00:00
|
|
|
|
2023-04-19 09:22:45 +00:00
|
|
|
if (LLAMA_CUBLAS)
|
|
|
|
cmake_minimum_required(VERSION 3.17)
|
|
|
|
|
|
|
|
find_package(CUDAToolkit)
|
|
|
|
if (CUDAToolkit_FOUND)
|
|
|
|
message(STATUS "cuBLAS found")
|
|
|
|
|
2023-04-20 01:14:14 +00:00
|
|
|
enable_language(CUDA)
|
|
|
|
|
|
|
|
set(GGML_CUDA_SOURCES ggml-cuda.cu ggml-cuda.h)
|
|
|
|
|
2023-04-19 09:22:45 +00:00
|
|
|
add_compile_definitions(GGML_USE_CUBLAS)
|
|
|
|
|
|
|
|
if (LLAMA_STATIC)
|
|
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart_static CUDA::cublas_static CUDA::cublasLt_static)
|
|
|
|
else()
|
|
|
|
set(LLAMA_EXTRA_LIBS ${LLAMA_EXTRA_LIBS} CUDA::cudart CUDA::cublas CUDA::cublasLt)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
else()
|
|
|
|
message(WARNING "cuBLAS not found")
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
2023-03-13 17:12:33 +00:00
|
|
|
if (LLAMA_ALL_WARNINGS)
|
|
|
|
if (NOT MSVC)
|
2023-03-21 00:37:16 +00:00
|
|
|
set(c_flags
|
|
|
|
-Wall
|
|
|
|
-Wextra
|
|
|
|
-Wpedantic
|
|
|
|
-Wcast-qual
|
2023-03-28 16:48:20 +00:00
|
|
|
-Wdouble-promotion
|
|
|
|
-Wshadow
|
2023-03-21 00:37:16 +00:00
|
|
|
-Wstrict-prototypes
|
|
|
|
-Wpointer-arith
|
|
|
|
)
|
|
|
|
set(cxx_flags
|
|
|
|
-Wall
|
|
|
|
-Wextra
|
|
|
|
-Wpedantic
|
|
|
|
-Wcast-qual
|
2023-03-28 16:51:55 +00:00
|
|
|
-Wno-unused-function
|
Rewrite loading code to try to satisfy everyone:
- Support all three formats (ggml, ggmf, ggjt). (However, I didn't
include the hack needed to support GPT4All files without conversion.
Those can still be used after converting them with convert.py from my
other PR.)
- Support both mmap and read (mmap is used by default, but can be
disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
files or on platforms where mmap is not supported).
- Support multi-file models like before, but automatically determine the
number of parts rather than requiring `--n_parts`.
- Improve validation and error checking.
- Stop using the per-file type field (f16) entirely in favor of just
relying on the per-tensor type/size fields. This has no immediate
benefit, but makes it easier to experiment with different formats, and
should make it easier to support the new GPTQ-for-LLaMa models in the
future (I have some work in progress on that front).
- Support VirtualLock on Windows (using the same `--mlock` option as on
Unix).
- Indicate loading progress when using mmap + mlock. (Which led me
to the interesting observation that on my Linux machine, with a
warm file cache, mlock actually takes some time, whereas mmap
without mlock starts almost instantly...)
- To help implement this, move mlock support from ggml to the
loading code.
- madvise/PrefetchVirtualMemory support (based on #740)
- Switch from ifstream to the `fopen` family of functions to avoid
unnecessary copying and, when mmap is enabled, allow reusing the same
file descriptor for both metadata reads and mmap (whereas the existing
implementation opens the file a second time to mmap).
- Quantization now produces a single-file output even with multi-file
inputs (not really a feature as much as 'it was easier this way').
Implementation notes:
I tried to factor the code into more discrete pieces than before.
Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:
- Destructors to make it easier to ensure everything gets cleaned up.
- Exceptions. I don't even usually use exceptions when writing C++, and
I can remove them if desired... but here they make the loading code
much more succinct while still properly handling a variety of errors,
ranging from API calls failing to integer overflow and allocation
failure. The exceptions are converted to error codes at the
API boundary.)
Co-authored-by: Pavol Rusnak <pavol@rusnak.io> (for the bit I copied from #740)
2023-04-08 19:24:37 +00:00
|
|
|
-Wno-multichar
|
2023-03-21 00:37:16 +00:00
|
|
|
)
|
2023-03-13 17:12:33 +00:00
|
|
|
else()
|
|
|
|
# todo : msvc
|
|
|
|
endif()
|
2023-03-21 00:37:16 +00:00
|
|
|
|
|
|
|
add_compile_options(
|
|
|
|
"$<$<COMPILE_LANGUAGE:C>:${c_flags}>"
|
|
|
|
"$<$<COMPILE_LANGUAGE:CXX>:${cxx_flags}>"
|
|
|
|
)
|
|
|
|
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
|
|
|
|
Rewrite loading code to try to satisfy everyone:
- Support all three formats (ggml, ggmf, ggjt). (However, I didn't
include the hack needed to support GPT4All files without conversion.
Those can still be used after converting them with convert.py from my
other PR.)
- Support both mmap and read (mmap is used by default, but can be
disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
files or on platforms where mmap is not supported).
- Support multi-file models like before, but automatically determine the
number of parts rather than requiring `--n_parts`.
- Improve validation and error checking.
- Stop using the per-file type field (f16) entirely in favor of just
relying on the per-tensor type/size fields. This has no immediate
benefit, but makes it easier to experiment with different formats, and
should make it easier to support the new GPTQ-for-LLaMa models in the
future (I have some work in progress on that front).
- Support VirtualLock on Windows (using the same `--mlock` option as on
Unix).
- Indicate loading progress when using mmap + mlock. (Which led me
to the interesting observation that on my Linux machine, with a
warm file cache, mlock actually takes some time, whereas mmap
without mlock starts almost instantly...)
- To help implement this, move mlock support from ggml to the
loading code.
- madvise/PrefetchVirtualMemory support (based on #740)
- Switch from ifstream to the `fopen` family of functions to avoid
unnecessary copying and, when mmap is enabled, allow reusing the same
file descriptor for both metadata reads and mmap (whereas the existing
implementation opens the file a second time to mmap).
- Quantization now produces a single-file output even with multi-file
inputs (not really a feature as much as 'it was easier this way').
Implementation notes:
I tried to factor the code into more discrete pieces than before.
Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:
- Destructors to make it easier to ensure everything gets cleaned up.
- Exceptions. I don't even usually use exceptions when writing C++, and
I can remove them if desired... but here they make the loading code
much more succinct while still properly handling a variety of errors,
ranging from API calls failing to integer overflow and allocation
failure. The exceptions are converted to error codes at the
API boundary.)
Co-authored-by: Pavol Rusnak <pavol@rusnak.io> (for the bit I copied from #740)
2023-04-08 19:24:37 +00:00
|
|
|
if (MSVC)
|
|
|
|
add_compile_definitions(_CRT_SECURE_NO_WARNINGS)
|
2023-04-22 08:18:20 +00:00
|
|
|
|
|
|
|
if (BUILD_SHARED_LIBS)
|
|
|
|
set(CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS ON)
|
|
|
|
endif()
|
Rewrite loading code to try to satisfy everyone:
- Support all three formats (ggml, ggmf, ggjt). (However, I didn't
include the hack needed to support GPT4All files without conversion.
Those can still be used after converting them with convert.py from my
other PR.)
- Support both mmap and read (mmap is used by default, but can be
disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
files or on platforms where mmap is not supported).
- Support multi-file models like before, but automatically determine the
number of parts rather than requiring `--n_parts`.
- Improve validation and error checking.
- Stop using the per-file type field (f16) entirely in favor of just
relying on the per-tensor type/size fields. This has no immediate
benefit, but makes it easier to experiment with different formats, and
should make it easier to support the new GPTQ-for-LLaMa models in the
future (I have some work in progress on that front).
- Support VirtualLock on Windows (using the same `--mlock` option as on
Unix).
- Indicate loading progress when using mmap + mlock. (Which led me
to the interesting observation that on my Linux machine, with a
warm file cache, mlock actually takes some time, whereas mmap
without mlock starts almost instantly...)
- To help implement this, move mlock support from ggml to the
loading code.
- madvise/PrefetchVirtualMemory support (based on #740)
- Switch from ifstream to the `fopen` family of functions to avoid
unnecessary copying and, when mmap is enabled, allow reusing the same
file descriptor for both metadata reads and mmap (whereas the existing
implementation opens the file a second time to mmap).
- Quantization now produces a single-file output even with multi-file
inputs (not really a feature as much as 'it was easier this way').
Implementation notes:
I tried to factor the code into more discrete pieces than before.
Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:
- Destructors to make it easier to ensure everything gets cleaned up.
- Exceptions. I don't even usually use exceptions when writing C++, and
I can remove them if desired... but here they make the loading code
much more succinct while still properly handling a variety of errors,
ranging from API calls failing to integer overflow and allocation
failure. The exceptions are converted to error codes at the
API boundary.)
Co-authored-by: Pavol Rusnak <pavol@rusnak.io> (for the bit I copied from #740)
2023-04-08 19:24:37 +00:00
|
|
|
endif()
|
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
if (LLAMA_LTO)
|
|
|
|
include(CheckIPOSupported)
|
|
|
|
check_ipo_supported(RESULT result OUTPUT output)
|
|
|
|
if (result)
|
|
|
|
set(CMAKE_INTERPROCEDURAL_OPTIMIZATION TRUE)
|
|
|
|
else()
|
|
|
|
message(WARNING "IPO is not supported: ${output}")
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
|
|
|
|
# Architecture specific
|
|
|
|
# TODO: probably these flags need to be tweaked on some architectures
|
|
|
|
# feel free to update the Makefile for your architecture and send a pull request or issue
|
2023-03-13 17:12:33 +00:00
|
|
|
message(STATUS "CMAKE_SYSTEM_PROCESSOR: ${CMAKE_SYSTEM_PROCESSOR}")
|
2023-03-21 00:37:16 +00:00
|
|
|
if (NOT MSVC)
|
|
|
|
if (LLAMA_STATIC)
|
|
|
|
add_link_options(-static)
|
|
|
|
if (MINGW)
|
|
|
|
add_link_options(-static-libgcc -static-libstdc++)
|
|
|
|
endif()
|
|
|
|
endif()
|
|
|
|
if (LLAMA_GPROF)
|
|
|
|
add_compile_options(-pg)
|
|
|
|
endif()
|
|
|
|
if (LLAMA_NATIVE)
|
|
|
|
add_compile_options(-march=native)
|
|
|
|
endif()
|
|
|
|
endif()
|
2023-03-13 17:12:33 +00:00
|
|
|
|
|
|
|
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "arm" OR ${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
|
|
|
message(STATUS "ARM detected")
|
2023-03-21 00:37:16 +00:00
|
|
|
if (MSVC)
|
|
|
|
# TODO: arm msvc?
|
|
|
|
else()
|
|
|
|
if (${CMAKE_SYSTEM_PROCESSOR} MATCHES "aarch64")
|
|
|
|
add_compile_options(-mcpu=native)
|
|
|
|
endif()
|
|
|
|
# TODO: armv6,7,8 version specific flags
|
|
|
|
endif()
|
|
|
|
elseif (${CMAKE_SYSTEM_PROCESSOR} MATCHES "^(x86_64|i686|AMD64)$")
|
2023-03-13 17:12:33 +00:00
|
|
|
message(STATUS "x86 detected")
|
|
|
|
if (MSVC)
|
2023-03-25 21:38:11 +00:00
|
|
|
if (LLAMA_AVX512)
|
2023-04-20 01:14:14 +00:00
|
|
|
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX512>)
|
|
|
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX512>)
|
2023-04-17 13:10:57 +00:00
|
|
|
# MSVC has no compile-time flags enabling specific
|
|
|
|
# AVX512 extensions, neither it defines the
|
|
|
|
# macros corresponding to the extensions.
|
|
|
|
# Do it manually.
|
|
|
|
if (LLAMA_AVX512_VBMI)
|
2023-04-20 01:14:14 +00:00
|
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VBMI__>)
|
|
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VBMI__>)
|
2023-04-17 13:10:57 +00:00
|
|
|
endif()
|
|
|
|
if (LLAMA_AVX512_VNNI)
|
2023-04-20 01:14:14 +00:00
|
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:C>:__AVX512VNNI__>)
|
|
|
|
add_compile_definitions($<$<COMPILE_LANGUAGE:CXX>:__AVX512VNNI__>)
|
2023-04-17 13:10:57 +00:00
|
|
|
endif()
|
2023-03-25 21:38:11 +00:00
|
|
|
elseif (LLAMA_AVX2)
|
2023-04-20 01:14:14 +00:00
|
|
|
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX2>)
|
|
|
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX2>)
|
2023-03-21 00:37:16 +00:00
|
|
|
elseif (LLAMA_AVX)
|
2023-04-20 01:14:14 +00:00
|
|
|
add_compile_options($<$<COMPILE_LANGUAGE:C>:/arch:AVX>)
|
|
|
|
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:/arch:AVX>)
|
2023-03-21 00:37:16 +00:00
|
|
|
endif()
|
2023-03-13 17:12:33 +00:00
|
|
|
else()
|
2023-04-13 12:48:21 +00:00
|
|
|
if (LLAMA_F16C)
|
|
|
|
add_compile_options(-mf16c)
|
|
|
|
endif()
|
2023-03-21 00:37:16 +00:00
|
|
|
if (LLAMA_FMA)
|
|
|
|
add_compile_options(-mfma)
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
2023-03-21 00:37:16 +00:00
|
|
|
if (LLAMA_AVX)
|
|
|
|
add_compile_options(-mavx)
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
2023-03-21 00:37:16 +00:00
|
|
|
if (LLAMA_AVX2)
|
|
|
|
add_compile_options(-mavx2)
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
2023-03-25 21:38:11 +00:00
|
|
|
if (LLAMA_AVX512)
|
|
|
|
add_compile_options(-mavx512f)
|
2023-04-17 13:10:57 +00:00
|
|
|
add_compile_options(-mavx512bw)
|
|
|
|
endif()
|
|
|
|
if (LLAMA_AVX512_VBMI)
|
|
|
|
add_compile_options(-mavx512vbmi)
|
|
|
|
endif()
|
|
|
|
if (LLAMA_AVX512_VNNI)
|
|
|
|
add_compile_options(-mavx512vnni)
|
2023-03-25 21:38:11 +00:00
|
|
|
endif()
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
2023-03-21 00:37:16 +00:00
|
|
|
else()
|
|
|
|
# TODO: support PowerPC
|
|
|
|
message(STATUS "Unknown architecture")
|
2023-03-13 17:12:33 +00:00
|
|
|
endif()
|
|
|
|
|
2023-03-21 00:37:16 +00:00
|
|
|
#
|
2023-03-22 05:32:36 +00:00
|
|
|
# Build libraries
|
2023-03-21 00:37:16 +00:00
|
|
|
#
|
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
add_library(ggml OBJECT
|
|
|
|
ggml.c
|
2023-04-20 01:14:14 +00:00
|
|
|
ggml.h
|
|
|
|
${GGML_CUDA_SOURCES})
|
2023-03-21 15:29:41 +00:00
|
|
|
|
2023-03-13 17:12:33 +00:00
|
|
|
target_include_directories(ggml PUBLIC .)
|
2023-03-21 15:29:41 +00:00
|
|
|
target_compile_features(ggml PUBLIC c_std_11) # don't bump
|
2023-04-21 18:27:06 +00:00
|
|
|
target_link_libraries(ggml PUBLIC Threads::Threads ${LLAMA_EXTRA_LIBS})
|
2023-04-22 13:31:56 +00:00
|
|
|
|
2023-03-23 20:16:48 +00:00
|
|
|
if (BUILD_SHARED_LIBS)
|
|
|
|
set_target_properties(ggml PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
|
|
endif()
|
2023-03-22 05:32:36 +00:00
|
|
|
|
2023-03-22 16:37:10 +00:00
|
|
|
add_library(llama
|
2023-03-22 05:32:36 +00:00
|
|
|
llama.cpp
|
Rewrite loading code to try to satisfy everyone:
- Support all three formats (ggml, ggmf, ggjt). (However, I didn't
include the hack needed to support GPT4All files without conversion.
Those can still be used after converting them with convert.py from my
other PR.)
- Support both mmap and read (mmap is used by default, but can be
disabled with `--no-mmap`, and is automatically disabled for pre-ggjt
files or on platforms where mmap is not supported).
- Support multi-file models like before, but automatically determine the
number of parts rather than requiring `--n_parts`.
- Improve validation and error checking.
- Stop using the per-file type field (f16) entirely in favor of just
relying on the per-tensor type/size fields. This has no immediate
benefit, but makes it easier to experiment with different formats, and
should make it easier to support the new GPTQ-for-LLaMa models in the
future (I have some work in progress on that front).
- Support VirtualLock on Windows (using the same `--mlock` option as on
Unix).
- Indicate loading progress when using mmap + mlock. (Which led me
to the interesting observation that on my Linux machine, with a
warm file cache, mlock actually takes some time, whereas mmap
without mlock starts almost instantly...)
- To help implement this, move mlock support from ggml to the
loading code.
- madvise/PrefetchVirtualMemory support (based on #740)
- Switch from ifstream to the `fopen` family of functions to avoid
unnecessary copying and, when mmap is enabled, allow reusing the same
file descriptor for both metadata reads and mmap (whereas the existing
implementation opens the file a second time to mmap).
- Quantization now produces a single-file output even with multi-file
inputs (not really a feature as much as 'it was easier this way').
Implementation notes:
I tried to factor the code into more discrete pieces than before.
Regarding code style: I tried to follow the code style, but I'm naughty
and used a few advanced C++ features repeatedly:
- Destructors to make it easier to ensure everything gets cleaned up.
- Exceptions. I don't even usually use exceptions when writing C++, and
I can remove them if desired... but here they make the loading code
much more succinct while still properly handling a variety of errors,
ranging from API calls failing to integer overflow and allocation
failure. The exceptions are converted to error codes at the
API boundary.)
Co-authored-by: Pavol Rusnak <pavol@rusnak.io> (for the bit I copied from #740)
2023-04-08 19:24:37 +00:00
|
|
|
llama.h
|
|
|
|
llama_util.h)
|
2023-03-22 05:32:36 +00:00
|
|
|
|
|
|
|
target_include_directories(llama PUBLIC .)
|
|
|
|
target_compile_features(llama PUBLIC cxx_std_11) # don't bump
|
2023-03-25 18:26:40 +00:00
|
|
|
target_link_libraries(llama PRIVATE ggml ${LLAMA_EXTRA_LIBS})
|
2023-04-22 13:31:56 +00:00
|
|
|
|
2023-03-23 20:16:48 +00:00
|
|
|
if (BUILD_SHARED_LIBS)
|
|
|
|
set_target_properties(llama PROPERTIES POSITION_INDEPENDENT_CODE ON)
|
|
|
|
target_compile_definitions(llama PRIVATE LLAMA_SHARED LLAMA_BUILD)
|
|
|
|
endif()
|
2023-03-21 00:37:16 +00:00
|
|
|
|
2023-04-20 01:14:14 +00:00
|
|
|
if (GGML_CUDA_SOURCES)
|
|
|
|
message(STATUS "GGML CUDA sources found, configuring CUDA architecture")
|
|
|
|
set_property(TARGET ggml PROPERTY CUDA_ARCHITECTURES OFF)
|
|
|
|
set_property(TARGET ggml PROPERTY CUDA_SELECT_NVCC_ARCH_FLAGS "Auto")
|
|
|
|
set_property(TARGET llama PROPERTY CUDA_ARCHITECTURES OFF)
|
|
|
|
endif()
|
|
|
|
|
|
|
|
|
2023-03-21 15:29:41 +00:00
|
|
|
#
|
|
|
|
# programs, examples and tests
|
|
|
|
#
|
|
|
|
|
|
|
|
if (LLAMA_BUILD_TESTS AND NOT CMAKE_JS_VERSION)
|
2023-03-30 17:56:59 +00:00
|
|
|
include(CTest)
|
2023-03-21 15:29:41 +00:00
|
|
|
add_subdirectory(tests)
|
|
|
|
endif ()
|
|
|
|
|
2023-03-25 18:26:40 +00:00
|
|
|
if (LLAMA_BUILD_EXAMPLES)
|
|
|
|
add_subdirectory(examples)
|
2023-04-18 19:00:14 +00:00
|
|
|
add_subdirectory(pocs)
|
2023-03-25 18:26:40 +00:00
|
|
|
endif()
|