2023-03-22 17:29:06 +00:00
|
|
|
function(llama_add_test source)
|
|
|
|
get_filename_component(TEST_TARGET ${source} NAME_WE)
|
|
|
|
add_executable(${TEST_TARGET} ${source})
|
2023-03-25 18:26:40 +00:00
|
|
|
target_link_libraries(${TEST_TARGET} PRIVATE llama)
|
2023-03-22 17:29:06 +00:00
|
|
|
add_test(NAME ${TEST_TARGET} COMMAND $<TARGET_FILE:${TEST_TARGET}> ${ARGN})
|
|
|
|
endfunction()
|
|
|
|
|
2023-03-28 16:48:20 +00:00
|
|
|
# llama_add_test(test-double-float.c) # SLOW
|
2023-04-22 09:10:39 +00:00
|
|
|
llama_add_test(test-quantize-fns.cpp)
|
|
|
|
llama_add_test(test-quantize-perf.cpp)
|
llama : new sampling algorithms (#1126)
* Sample interface, new samplers.
New samplers:
- locally typical sampling
- tail free sampling
- frequency and presence penalty
- mirostat
Ignore EOS fix: -inf should be used.
* mirostat
* Added --logit-bias and --no-penalize-nl, removed std::span
* Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
Use C++11, clarify llama API documentation, rename Mirostat parameters to --mirostat_lr and --mirostat_ent, add temperature sampling for Mirostat, simplify Mirostat sampling API parameters (removed N and *k)
* Save and load example adjust
* Tests
* Windows build fix
* Windows test fix
2023-04-29 05:34:41 +00:00
|
|
|
llama_add_test(test-sampling.cpp)
|
2023-03-22 17:29:06 +00:00
|
|
|
llama_add_test(test-tokenizer-0.cpp ${CMAKE_CURRENT_SOURCE_DIR}/../models/ggml-vocab.bin)
|