mirror of
https://git.adityakumar.xyz/llama.cpp.git
synced 2024-11-09 15:29:43 +00:00
llama : do not allocate KV cache for "vocab_only == true" (#682)
Fixes sanitizer CI
This commit is contained in:
parent
c4f89d8d73
commit
81040f10aa
1 changed files with 1 additions and 1 deletions
|
@ -1608,7 +1608,7 @@ struct llama_context * llama_init_from_file(
|
|||
}
|
||||
|
||||
// reserve memory for context buffers
|
||||
{
|
||||
if (!params.vocab_only) {
|
||||
if (!kv_cache_init(ctx->model.hparams, ctx->model.kv_self, memory_type, ctx->model.hparams.n_ctx)) {
|
||||
fprintf(stderr, "%s: kv_cache_init() failed for self-attention cache\n", __func__);
|
||||
llama_free(ctx);
|
||||
|
|
Loading…
Reference in a new issue