mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
f5a77a629b
* Major refactoring - introduce C-style API * Clean up * Add <cassert> * Add <iterator> * Add <algorithm> .... * Fix timing reporting and accumulation * Measure eval time only for single-token calls * Change llama_tokenize return meaning
4 lines
268 B
CMake
4 lines
268 B
CMake
set(TEST_TARGET test-tokenizer-0)
|
|
add_executable(${TEST_TARGET} ${TEST_TARGET}.cpp)
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama ggml utils)
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|