mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-09-19 19:13:05 +00:00
llama : fix --mtest option (close #1414)
This commit is contained in:
parent
773ee249fb
commit
fb62f92433
1 changed files with 1 additions and 1 deletions
|
@ -121,7 +121,7 @@ int main(int argc, char ** argv) {
|
|||
// uncomment the "used_mem" line in llama.cpp to see the results
|
||||
if (params.mem_test) {
|
||||
{
|
||||
const std::vector<llama_token> tmp(params.n_batch, 0);
|
||||
const std::vector<llama_token> tmp(params.n_batch, llama_token_bos());
|
||||
llama_eval(ctx, tmp.data(), tmp.size(), 0, params.n_threads);
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue