mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-14 00:59:43 +00:00
FP16 is supported in CM=6.0 (#2177)
* FP16 is supported in CM=6.0 * Building PTX code for both of 60 and 61 Co-authored-by: Johannes Gäßler <johannesg@5d6.de>
This commit is contained in:
parent
2b5eb72e10
commit
4e7464ef88
1 changed files with 1 additions and 1 deletions
|
@ -272,7 +272,7 @@ if (LLAMA_CUBLAS)
|
|||
|
||||
if (NOT DEFINED CMAKE_CUDA_ARCHITECTURES)
|
||||
if (LLAMA_CUDA_DMMV_F16)
|
||||
set(CMAKE_CUDA_ARCHITECTURES "61") # needed for f16 CUDA intrinsics
|
||||
set(CMAKE_CUDA_ARCHITECTURES "60;61") # needed for f16 CUDA intrinsics
|
||||
else()
|
||||
set(CMAKE_CUDA_ARCHITECTURES "52;61") # lowest CUDA 12 standard + lowest for integer intrinsics
|
||||
endif()
|
||||
|
|
Loading…
Reference in a new issue