mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
Init llama_context_params properly from CLI (#370)
This commit is contained in:
parent
56817b1f88
commit
928480ef5b
2 changed files with 8 additions and 1 deletions
|
@ -1398,6 +1398,10 @@ struct llama_context * llama_init_from_file(
|
|||
|
||||
llama_context * ctx = new llama_context;
|
||||
|
||||
if (params.seed <= 0) {
|
||||
params.seed = time(NULL);
|
||||
}
|
||||
|
||||
ctx->rng = std::mt19937(params.seed);
|
||||
ctx->logits_all = params.logits_all;
|
||||
|
||||
|
|
5
main.cpp
5
main.cpp
|
@ -194,7 +194,10 @@ int main(int argc, char ** argv) {
|
|||
{
|
||||
auto lparams = llama_context_default_params();
|
||||
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.n_ctx = params.n_ctx;
|
||||
lparams.n_parts = params.n_parts;
|
||||
lparams.seed = params.seed;
|
||||
lparams.f16_kv = params.memory_f16;
|
||||
lparams.logits_all = params.perplexity;
|
||||
|
||||
ctx = llama_init_from_file(params.model.c_str(), lparams);
|
||||
|
|
Loading…
Reference in a new issue